Commit 3c3fb4fd authored by Anna Warno's avatar Anna Warno
Browse files

timestamp corrected

parent a7d58dd7
......@@ -106,7 +106,7 @@ def main():
for i in range(number_of_forward_predictions[metric]):
prediction_msgs, prediction = predict(
metric,
prediction_cycle,
prediction_cycle // msg["publish_rate"],
extra_data=predictions,
m=i + 1,
prediction_hor=prediction_horizon,
......
......@@ -10,7 +10,7 @@ from datetime import timedelta
from src.dataset_maker import CSVData
TOPIC_NAME = "training_models"
RETRAIN_CYCLE = 10 # minutes
RETRAIN_CYCLE = 2 # minutes
HOSTS = (os.environ.get("AMQ_HOSTNAME", "localhost"), os.environ.get("AMQ_PORT", 61613))
AMQ_USER = os.environ.get("AMQ_USER", "admin")
AMQ_PASSWORD = os.environ.get("AMQ_PASSWORD", "admin")
......
......@@ -12,9 +12,19 @@ import logging
"""Script for nbeats fusion transformer prediction"""
def predict(target_column, yaml_file="nbeats.yaml", extra_data=None):
def predict(
target_column,
prediction_length,
yaml_file="model.yaml",
extra_data=None,
m=1,
prediction_hor=60,
timestamp=0,
):
with open(yaml_file) as file:
params = yaml.load(file, Loader=yaml.FullLoader)
params["dataset"]["prediction_length"] = prediction_length
params["dataset"]["context_length"] = prediction_length * 10
data_path = os.path.join(
os.environ.get("DATA_PATH", "./"), f'{os.environ.get("APP_NAME", "demo")}.csv'
......@@ -66,24 +76,23 @@ def predict(target_column, yaml_file="nbeats.yaml", extra_data=None):
prediction_input = prediction_input.to_dataloader(train=False)
prediction = model.predict(prediction_input, mode="raw")["prediction"]
print(prediction)
msg = {
target_column: {
"metricValues": prediction[-1][-1].item(),
"level": 0,
"timestamp": time.time(),
"metricValues": prediction[-1][-1][2].item(),
"level": "TODO",
"timestamp": timestamp,
"probability": 0.95,
"confidence_interval": "TODO", # quantiles difference
"horizon": 20 * 60, # TODO
"confidence_interval": abs(
(prediction[-1][-1][-1] - prediction[-1][-1][0]).item()
), # quantiles difference
"horizon": prediction_hor * m,
"refersTo": "TODO",
"cloud": "TODO",
"provider": "TODO",
}
}
print(f"prediction msg: {msg}")
logging.debug(f"prediction msg: {msg}")
# return predicted values
future_df["split"] = "val"
future_df[target_column] = prediction[-1]
future_df[target_column] = prediction.permute(0, 2, 1)[0][2]
return (msg, future_df)
......@@ -21,11 +21,13 @@ LOSSES_DICT = {
}
def train(target_column, yaml_file="nbeats.yaml"):
def train(target_column, prediction_length, yaml_file="model.yaml"):
torch.manual_seed(12345)
with open(yaml_file) as file:
params = yaml.load(file, Loader=yaml.FullLoader)
params["dataset"]["prediction_length"] = prediction_length
params["dataset"]["context_length"] = prediction_length * 10
data_path = os.path.join(
os.environ.get("DATA_PATH", "./"), f'{os.environ.get("APP_NAME", "demo")}.csv'
......
......@@ -89,14 +89,14 @@ def main():
)
# msg1 = Msg()
# msg1.body = '[{"metric": "cpu_usage", "level": 3, "publish_rate": 60}]'
# msg1.body = '[{"metric": "cpu_usage", "level": 3, "publish_rate": 6000}]'
# msg2 = Msg()
# msg2.body = """{
# "metrics": ["cpu_usage"],
# "timestamp": 0,
# "epoch_start": 0,
# "number_of_forward_predictions": 5,
# "prediction_horizon": 120}"""
# "number_of_forward_predictions": 8,
# "prediction_horizon": 12000}"""
# StartListener(start_conn.conn, START_APP_TOPIC).on_message(msg1)
# StartForecastingListener(start_conn.conn, START_APP_TOPIC).on_message(msg2)
......
......@@ -106,7 +106,7 @@ def main():
for i in range(number_of_forward_predictions[metric]):
prediction_msgs, prediction = predict(
metric,
prediction_cycle,
prediction_cycle // msg["publish_rate"],
extra_data=predictions,
m=i + 1,
prediction_hor=prediction_horizon,
......
......@@ -26,6 +26,7 @@ def predict(
with open(yaml_file) as file:
params = yaml.load(file, Loader=yaml.FullLoader)
params["dataset"]["prediction_length"] = prediction_length
params["dataset"]["context_length"] = prediction_length * 10
data_path = os.path.join(
os.environ.get("DATA_PATH", "./"), f'{os.environ.get("APP_NAME", "demo")}.csv'
......
......@@ -28,6 +28,7 @@ def train(target_column, prediction_length, yaml_file="model.yaml"):
with open(yaml_file) as file:
params = yaml.load(file, Loader=yaml.FullLoader)
params["dataset"]["prediction_length"] = prediction_length
params["dataset"]["context_length"] = prediction_length * 10
data_path = os.path.join(
os.environ.get("DATA_PATH", "./"), f'{os.environ.get("APP_NAME", "demo")}.csv'
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment