Commit 0c62933e authored by Jean-Didier Totow's avatar Jean-Didier Totow
Browse files

performance model, persistent storage

parent 766ef22d
[uwsgi]
chdir = /app
chdir2 = /app
master = true
enable-threads = true
module = service
uid = www-data
gid = www-data
callable = app
buffer-size = 65535
lazy = true
chmod-socket = 666
socket = /tmp/uwsgi.sock
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
from __future__ import print_function
import logging
import grpc
import service_pb2 as pb2
import service_pb2_grpc as gpd2
features = ['cpu_usage','memory','level','response_time','latency']
proto_list = pb2.ListOfStrings()
proto_list.strings.extend(features)
def run():
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
response = None
with grpc.insecure_channel('localhost:8767') as channel:
stub = gpd2.PredictStub(channel)
response = stub.trainModel(pb2.TrainRequest(application='demo', target='response_time', url_file="",features=proto_list))
print(response)
if __name__ == '__main__':
logging.basicConfig()
run()
from __future__ import print_function
import logging, time, random
import grpc
import service_pb2 as pb2
import service_pb2_grpc as gpd2
#proto_list = pb2.ListOfStrings()
#proto_list.strings.extend(features)
metrics = ['response_time','cpu_usage','memory','latency']
global_cpu_usage, global_latency, global_memory = None, None, None
def generateMeasurement(name):
global global_cpu_usage, global_latency, global_memory
if name == "response_time":
_value = random.randint(100,400)
global_cpu_usage = 1000/_value + random.randint(1,10)
global_latency = _value*0.29
global_memory = 3000/_value + random.randint(1,10)
return _value
elif name == 'cpu_usage':
return global_cpu_usage
elif name == 'latency':
return global_latency
else:
return global_memory
def makeProtoPair(key, value):
pair = pb2.Pair()
pair.key = key
pair.value = value
return pair
def run():
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
response = None
features = {'cpu_usage': 31, "memory": 230, 'latency': 2.1, 'level': 1}
with grpc.insecure_channel('localhost:8767') as channel:
stub = gpd2.PredictStub(channel)
while True:
features_proto = pb2.Dictionary()
features_proto.fields['cpu_usage'].float_value = features['cpu_usage']
features_proto.fields['memory'].float_value = features['memory']
features_proto.fields['latency'].float_value = features['latency']
features_proto.fields['level'].float_value = features['level']
print(features)
print("--------------------")
response = stub.PredictPerformance(pb2.PredictRequest(application='demo', target='response_time', features=features_proto))
print(response)
time.sleep(5)
for m in metrics:
if m == "response_time":
generateMeasurement(m)
continue
features[m] = generateMeasurement(m)
features[m] = generateMeasurement(m)
if __name__ == '__main__':
logging.basicConfig()
run()
This diff is collapsed.
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import service_pb2 as service__pb2
class PredictStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.PredictPerformance = channel.unary_unary(
'/proto.Predict/PredictPerformance',
request_serializer=service__pb2.PredictRequest.SerializeToString,
response_deserializer=service__pb2.PredictReply.FromString,
)
self.getModel = channel.unary_unary(
'/proto.Predict/getModel',
request_serializer=service__pb2.ModelRequest.SerializeToString,
response_deserializer=service__pb2.ModelReply.FromString,
)
self.trainModel = channel.unary_unary(
'/proto.Predict/trainModel',
request_serializer=service__pb2.TrainRequest.SerializeToString,
response_deserializer=service__pb2.TrainReply.FromString,
)
class PredictServicer(object):
# missing associated documentation comment in .proto file
pass
def PredictPerformance(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def trainModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PredictServicer_to_server(servicer, server):
rpc_method_handlers = {
'PredictPerformance': grpc.unary_unary_rpc_method_handler(
servicer.PredictPerformance,
request_deserializer=service__pb2.PredictRequest.FromString,
response_serializer=service__pb2.PredictReply.SerializeToString,
),
'getModel': grpc.unary_unary_rpc_method_handler(
servicer.getModel,
request_deserializer=service__pb2.ModelRequest.FromString,
response_serializer=service__pb2.ModelReply.SerializeToString,
),
'trainModel': grpc.unary_unary_rpc_method_handler(
servicer.trainModel,
request_deserializer=service__pb2.TrainRequest.FromString,
response_serializer=service__pb2.TrainReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'proto.Predict', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
import requests, json, time
url = "http://localhost:8766"
#train model request
url_file = '/home/jean-didier/Projects/morphemic/performance-model/ml_code/example/dataset.csv'
url_file_3 = '/home/jean-didier/Projects/morphemic/performance-model/ml_code/example/all.csv'
#features = ['time','served_request','request_rate','response_time','performance','cpu_usage','memory']
#features_3 = ['number','served_request','request_rate','number_instances','response_time','performance','cpu_usage','cpu_alloc','memory','memory_alloc']
features_2 = ['cpu_usage','memory','level','response_time','latency']
#post_data = {'url_file': url_file, 'application': 'application-1','target':'performance','features': features}
post_data_2 = {'url_file': "", 'application': 'fcr','target':'response_time','features': features_2}
#post_data_3 = {'url_file': url_file_3, 'application': 'application-3','target':'performance','features': features_3}
#print("Get model")
#response = requests.post(url+"/api/v1/model", data='{"application":"application-2"}', headers={'Content-Type':'application/json'})
#print(response.text)
#response = requests.post(url+"/api/v1/model/train", data=json.dumps(post_data_2),headers={'Content-Type':'application/json'}).text
#print("Training phase")
#print(response)
#time.sleep(5)
#prediction request
#features = {'cpu_alloc': 1 ,'memory_alloc': 64,'number_instances':4, "memory": 51086677.3333}
features = {'cpu_usage': 31, "memory": 4500.23, 'latency': 2.1, 'level': 1}
post_data = {'application': 'fcr','target':'response_time','features':features}
response = requests.post(url+"/api/v1/model/predict", data=json.dumps(post_data),headers={'Content-Type':'application/json'}).text
print(response)
\ No newline at end of file
import requests, json, time
url = "http://localhost:8766"
#train model request
url_file = '/home/jean-didier/Projects/morphemic/performance-model/ml_code/example/dataset.csv'
#url_file_3 = '/home/jean-didier/Projects/morphemic/performance-model/ml_code/example/all.csv'
features = ['time','served_request','request_rate','response_time','performance','cpu_usage','memory']
#features_3 = ['number','served_request','request_rate','number_instances','response_time','performance','cpu_usage','cpu_alloc','memory','memory_alloc']
#features_2 = ['time','cpu_usage','memory']
post_data = {'url_file': url_file, 'application': 'application-1','target':'performance','features': features}
#post_data_2 = {'url_file': "", 'application': 'application-2','target':'response_time','features': features_2}
#post_data_3 = {'url_file': url_file_3, 'application': 'application-3','target':'performance','features': features_3}
#print("Get model")
#response = requests.post(url+"/api/v1/model", data='{"application":"application-1"}', headers={'Content-Type':'application/json'})
#print(response.text)
#response = requests.post(url+"/api/v1/model/train", data=json.dumps(post_data),headers={'Content-Type':'application/json'}).text
#print("Training phase")
#print(response)
#time.sleep(5)
#prediction request
#'time','served_request','request_rate','response_time','performance','cpu_usage','memory'
features = {'served_request': 267, 'request_rate': 60,'time':1602538627.766, 'response_time': 2, 'cpu_usage': 31, "memory": 51086677.3333}
post_data = {'application': 'application-1','target':'performance','features':features}
response = requests.post(url+"/api/v1/model/predict", data=json.dumps(post_data),headers={'Content-Type':'application/json'}).text
print(response)
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment