Commit 3ad9c26d authored by Anna Warno's avatar Anna Warno
Browse files

arima error package corrected

parent f52e2733
Pipeline #20427 passed with stage
in 3 minutes and 8 seconds
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
,awarno,bulls-ThinkPad-T480,28.09.2021 09:11,file:///home/awarno/.config/libreoffice/4;
\ No newline at end of file
......@@ -2,9 +2,13 @@ FROM python:3.8-slim-buster
# Install Python dependencies.
WORKDIR /wd
# COPY deployment/arima/requirements.txt .
COPY deployment/arima/requirements.txt .
RUN pip3 install --no-cache-dir -r requirements.txt && mkdir models
COPY deployment/arima/poetry.lock deployment/arima/pyproject.toml /wd/
RUN pip install --no-cache-dir --upgrade pip \
&& pip install --no-cache-dir poetry \
\
&& poetry install --no-dev && mkdir models
ADD https://gitlab.ow2.org/melodic/morphemic-preprocessor/-/archive/morphemic-rc1.5/morphemic-preprocessor-morphemic-rc1.5.tar.gz /var/lib/morphemic/
......@@ -19,6 +23,6 @@ RUN cd /var/lib/morphemic/ \
&& cp -R /var/lib/morphemic/morphemic-preprocessor-morphemic-rc1.5/amq-message-python-library /wd/amq_message_python_library \
&& rm -rf /var/lib/morphemic
CMD ["python3", "main.py"]
CMD ["poetry", "run" ,"python3", "main.py"]
from morphemic.dataset import DatasetMaker
"""Script for preparing csv data downloaded form InfluxDB database, data"""
import os
from filelock import FileLock
"""Script for preparing csv data downloaded form InfluxDB database, data"""
from filelock import FileLock
from typing import Optional, Union
from morphemic.dataset import DatasetMaker
class CSVData(object):
def __init__(self, name, start_collection=None):
def __init__(
self, name: str, start_collection: Optional[Union[str, int]] = None
) -> None:
"""Create csv data downloaded from influx
Args:
------
name (str): dataset name, downloaded dataset will be saved
under : <SELF NAME>.csv
start_collection (Optional[Union[str, int]], optional): start timestamp
if None then the whole dataset is downloaded. Defaults to None.
"""
self.name = name
self.config = {
"hostname": os.environ.get("INFLUXDB_HOSTNAME", "localhost"),
......@@ -18,7 +31,8 @@ class CSVData(object):
}
self.start_collection = start_collection
def prepare_csv(self):
def prepare_csv(self) -> None:
"""Download latest data"""
lockfile = os.path.join(self.config["path_dataset"], f"{self.name}.csv")
lock = FileLock(lockfile + ".lock")
......
from influxdb import *
import os
import datetime
import os
from influxdb import *
"""Connects with Influxdb and sends predictet values"""
......
import yaml
import pandas as pd
import numpy as np
import time
import logging
import os
import statsmodels.api as sm
from filelock import FileLock
from src.preprocess_dataset import Dataset
import time
import logging
import pytz
from datetime import datetime
import numpy as np
import pandas as pd
import pytz
import statsmodels.api as sm
import yaml
from filelock import FileLock
from arima.preprocess_dataset import Dataset
pd.options.mode.chained_assignment = None
"""Script for temporal fusion transformer prediction"""
......
import time
import os
import time
def train(target_column, prediction_length, yaml_file="model.yaml"):
......
import pandas as pd
import numpy as np
import logging
import time
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
"""Script for preparing time series dataset from pythorch-forecasting package
......
import os
import stomp
import json
from amq_message_python_library import * # python amq-message-python-library
import logging
import os
import time
from datetime import datetime
from pytz import timezone
import time
import setproctitle
# from src.log import logger
import setproctitle
import stomp
from amq_message_python_library import * # python amq-message-python-library
from pytz import timezone
AMQ_USER = os.environ.get("AMQ_USER", "admin")
AMQ_PASSWORD = os.environ.get("AMQ_PASSWORD", "admin")
......
This diff is collapsed.
import time
import os
import stomp
import threading
from src.model_predict import predict
from amq_message_python_library import *
from src.influxdb_predictions import InfluxdbPredictionsSender
import json
import sys
import pandas as pd
import logging
import os
import sys
import threading
import time
from datetime import datetime
from src.dataset_maker import CSVData
from pytz import timezone
import pytz
from datetime import datetime
import random
import stomp
from amq_message_python_library import *
from pytz import timezone
from arima.dataset_maker import CSVData
from arima.influxdb_predictions import InfluxdbPredictionsSender
from arima.model_predict import predict
METHOD = os.environ.get("METHOD", "nbeats")
START_TOPIC = f"start_forecasting.{METHOD}"
......
[tool.poetry]
name = "arima"
version = "0.1.0"
description = ""
authors = ["Anna Warno <awarno@7bulls.com>"]
[tool.poetry.dependencies]
python = "^3.8"
"stomp.py" = "^8.0.0"
pandas = "1.1.3"
statsmodels = "0.13.2"
filelock = "3.0.12"
PyYAML = "^6.0"
influxdb = "^5.3.1"
python-slugify = "^6.1.1"
setproctitle = "^1.2.2"
[tool.poetry.dev-dependencies]
pytest = "^5.2"
isort = "^5.10.1"
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
stomp.py
pandas==1.1.3
statsmodels==0.12.2
filelock==3.0.12
pyyaml
influxdb
python-slugify
setproctitle
import os
import sys
import json
import logging
import time
from src.model_train import train
from amq_message_python_library import *
from src.dataset_maker import CSVData
import pytz
import os
import sys
import time
from datetime import datetime
import pytz
from amq_message_python_library import *
from arima.dataset_maker import CSVData
from arima.model_train import train
TOPIC_NAME = "training_models"
RETRAIN_CYCLE = 10 # minutes
AMQ_USER = os.environ.get("AMQ_USER", "admin")
......
......@@ -2,12 +2,13 @@ import sys
sys.path.append(".")
import pytest
from src.model_predict import predict
import pandas as pd
import numpy as np
import random
import numpy as np
import pandas as pd
import pytest
from arima.model_predict import predict
@pytest.fixture
def df_1():
......
......@@ -2,12 +2,13 @@ import sys
sys.path.append(".")
import pytest
from src.model_train import train
import pandas as pd
import numpy as np
import random
import numpy as np
import pandas as pd
import pytest
from arima.model_train import train
@pytest.fixture
def df_1():
......
......@@ -2,12 +2,13 @@ import sys
sys.path.append(".")
import pytest
from src.preprocess_dataset import Dataset
import pandas as pd
import numpy as np
import random
import numpy as np
import pandas as pd
import pytest
from arima.preprocess_dataset import Dataset
@pytest.fixture
def df_1():
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment