error in deploying a sentiment analysis code with docker and testing it locally

Multi tool use
Multi tool use
The name of the pictureThe name of the pictureThe name of the pictureClash Royale CLAN TAG#URR8PPP


error in deploying a sentiment analysis code with docker and testing it locally



Please help. I am trying to deploy a sentiment analysis code with docker and testing it locally.



I am using ubuntu 16.04



I am able to run main.py in one terminal and get the prediction using another terminal using the curl command "curl -H "Content-Type: application/json" -X POST -d '{"Sentences": ["i like programming"] }' http://localhost:8080"
testing main.py with curl command without docker



But when I build and test this locally using docker with the same curl command (with port 8081). I get this error.


$ docker build -t sentiment-service .
$ docker run -it --rm -p 8081:80 sentiment-service



error thrown after built and test using docker locally



Please help to resolve this error, so that I can get predictions when deployed locally using docker with curl command as shown in first image



The directory structure is



main.py


from flask import Flask, current_app, request, jsonify
import io
import tensorflow as tf
import pickle
import gc
from predictor import Predictor
import logging

global graph,model
graph = tf.get_default_graph()
from keras.models import load_model
#from keras import backend as K

lexicon_file = open('lexicon.pickle', mode='r')
lexicon = pickle.load(lexicon_file)

model = load_model('model.h5')
model.summary()
predictor = Predictor(model=model, lexicon=lexicon)

app = Flask(__name__)

@app.before_first_request
def setup_logging():
if not app.debug:
# In production mode, add log handler to sys.stderr.
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)

@app.route('/', methods=['POST'])
def predict():
data = {}
try:
input_str_list = request.get_json()['sentences']
except Exception:
return jsonify(status_code='400', msg='Bad Request'), 400
with graph.as_default():
predictions = predictor.predict(input_str_list).tolist()
current_app.logger.info('Predictions: %s', predictions)
return jsonify(predictions=predictions)


if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=False)



predictor.py


import numpy as np
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer

class Predictor:
def __init__(self, model, lexicon):
self.model = model
self.lexicon = lexicon
self.lemmatizer = WordNetLemmatizer()

def _get_feature_vector(self, input_str_list):
featureset = np.empty((0, len(self.lexicon)))

for i, input_str in enumerate(input_str_list):
current_words = word_tokenize(input_str.lower())
current_words = [self.lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(self.lexicon))

for word in current_words:
if word.lower() in self.lexicon:
index_value = self.lexicon.index(word.lower())
features[index_value] += 1
features = features / np.max(features)
featureset = np.append(featureset, np.array([features]), axis=0)

return featureset

def predict(self, input_str_list):

featureset = self._get_feature_vector(input_str_list)

assert featureset.shape[0] > 0

result = self.model.predict(featureset)

return result



nginx.conf


daemon off;
error_log /dev/stdout info;
worker_processes 1;

# user nobody nogroup;
pid /tmp/nginx.pid;

events {
worker_connections 1024;
accept_mutex off;
}

http {
include mime.types;
default_type application/octet-stream;
access_log /dev/stdout combined;
sendfile on;

upstream app_server {
# For a TCP configuration:
server 127.0.0.1:5000 fail_timeout=0;
}

server {
listen 80 default;
client_max_body_size 4G;
server_name _;

keepalive_timeout 5;

# path for static files
root /opt/app/static;

location / {
# checks for static file, if not found proxy to app
try_files $uri @proxy_to_app;
}

location @proxy_to_app {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;

proxy_pass http://app_server;
}

}
}



supervisord.conf


[supervisord]
nodaemon = true

[program:nginx]
command = /usr/sbin/nginx
startsecs = 60
stdout_events_enabled = true
stderr_events_enabled = true

[program:app-gunicorn]
command = /opt/venv/bin/gunicorn main:app -w 3 -b 0.0.0.0:5000 --log-level=info --chdir=/opt/app -t 150
;command = /opt/venv/bin/python /opt/app/main.py
autostart= true
autorestart= true
stdout_events_enabled = true
stderr_events_enabled = true

[eventlistener:stdout]
command = supervisor_stdout
buffer_size = 1000
events = PROCESS_LOG
result_handler = supervisor_stdout:event_handler



Dockerfile


FROM ubuntu:16.04

RUN apt-get update --fix-missing

# Install virtualenv, nginx, supervisor
RUN apt-get install -y python-pip python-virtualenv
RUN apt-get install -y nginx supervisor

RUN service supervisor stop

# create virtual env and install dependencies
RUN virtualenv /opt/venv

RUN /opt/venv/bin/pip install tensorflow==1.0.1
RUN /opt/venv/bin/pip install keras==2.0.3
RUN /opt/venv/bin/pip install h5py
RUN /opt/venv/bin/pip install nltk
RUN /opt/venv/bin/python -m nltk.downloader punkt wordnet
RUN /opt/venv/bin/pip install flask gunicorn

# expose port
EXPOSE 80

RUN pip install supervisor-stdout

# Add our config files
ADD ./supervisord.conf /etc/supervisord.conf
ADD ./nginx.conf /etc/nginx/nginx.conf

# Copy our service code
ADD ./service /opt/app

# restart nginx to load the config
RUN service nginx stop

# start supervisor to run our wsgi server
CMD supervisord -c /etc/supervisord.conf -n









By clicking "Post Your Answer", you acknowledge that you have read our updated terms of service, privacy policy and cookie policy, and that your continued use of the website is subject to these policies.

V0QDx0I,zcAuibRjAi Co2PDcYY
O8ZNVFAfntD8nD,kangebcsa

Popular posts from this blog

Makefile test if variable is not empty

Visual Studio Code: How to configure includePath for better IntelliSense results

Will Oldham