Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
A
alpha-star-solver
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
Operations
Operations
Incidents
Packages & Registries
Packages & Registries
Package Registry
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Melodic
alpha-star-solver
Commits
2d957a89
Commit
2d957a89
authored
Jan 25, 2021
by
szymon sadkowski
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
removed private file
parent
2c213d57
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
0 additions
and
116 deletions
+0
-116
FCRtraining/networks/lstm_network1.py
FCRtraining/networks/lstm_network1.py
+0
-116
No files found.
FCRtraining/networks/lstm_network1.py
deleted
100644 → 0
View file @
2c213d57
from
FCRdataLoader.fcrdataloader.dataset
import
FCRdatasetFactory
from
torch.utils.data
import
DataLoader
from
torch.optim.lr_scheduler
import
ReduceLROnPlateau
from
sklearn.preprocessing
import
MinMaxScaler
,
StandardScaler
from
.LitFCRtestBase
import
BaseTestEncoder
import
torch.nn.functional
as
F
import
torch.nn
as
nn
import
torch
'''
Dont touch great performance
'''
HIDDEN_SIZE
=
30
BATCH_SIZE
=
128
SEQ_LEN
=
20
HORIZON
=
5
LSTM_LAYERS
=
3
LR
=
0.1
FEATURES
=
3
OUTPUT
=
6
class
Encoder
(
BaseTestEncoder
):
def
__init__
(
self
,
features
=
FEATURES
,
output
=
OUTPUT
,
lr
=
LR
,
batch_size
=
BATCH_SIZE
,
seq_len
=
SEQ_LEN
,
horizon
=
HORIZON
,
hidden_size
=
HIDDEN_SIZE
,
lstm_layers
=
LSTM_LAYERS
):
super
(
Encoder
,
self
).
__init__
()
self
.
seq_len
=
seq_len
self
.
horizon
=
horizon
self
.
batch_size
=
batch_size
self
.
lstm_layers
=
LSTM_LAYERS
self
.
criterion
=
nn
.
MSELoss
()
self
.
lr
=
lr
self
.
lstm
=
nn
.
LSTM
(
features
,
hidden_size
,
num_layers
=
self
.
lstm_layers
,
bidirectional
=
True
,
batch_first
=
True
)
self
.
fc1
=
nn
.
Linear
(
hidden_size
*
2
,
output
)
# data transformation
self
.
data_set_factory
=
FCRdatasetFactory
(
SEQ_LEN
,
HORIZON
)
def
forward
(
self
,
x
):
out
,
_
=
self
.
lstm
(
x
)
# out: (batch, features, hidden_size * directions)
out
=
out
[:,
-
1
,
:]
# out: (batch, hidden_size * directions)
out
=
self
.
fc1
(
out
)
return
out
def
training_step
(
self
,
batch
,
batch_idx
):
x
,
y
=
batch
prediction
=
self
(
x
)
#print(f"x = {x[0]}")
#print(f"pred = {torch.round(prediction[0])}")
#print(f"y = {y[0]}")
loss
=
self
.
criterion
(
prediction
,
y
)
self
.
log
(
'train_loss'
,
loss
,
on_step
=
False
,
on_epoch
=
True
)
return
loss
def
val_dataloader
(
self
):
return
self
.
test_dataloader
()
def
train_dataloader
(
self
):
return
DataLoader
(
self
.
data_set_factory
.
get_train_dataset
(),
batch_size
=
self
.
batch_size
,
num_workers
=
4
,
sampler
=
self
.
data_set_factory
.
get_uniform_dist_y_sampler
()
)
def
test_dataloader
(
self
):
loader
=
DataLoader
(
self
.
data_set_factory
.
get_test_dataset
(),
batch_size
=
self
.
batch_size
,
num_workers
=
4
)
return
loader
def
configure_optimizers
(
self
):
optimizer
=
torch
.
optim
.
Adam
(
self
.
parameters
(),
lr
=
self
.
lr
)
scheduler
=
ReduceLROnPlateau
(
optimizer
,
'min'
,
patience
=
20
,
verbose
=
True
)
return
{
'optimizer'
:
optimizer
,
'lr_scheduler'
:
scheduler
,
'monitor'
:
'train_loss'
}
def
__get_scaler
(
self
,
train_dataset
):
scaler_loader
=
DataLoader
(
train_dataset
,
batch_size
=
len
(
train_dataset
)
)
scaler
=
MinMaxScaler
(
feature_range
=
(
-
10
,
10
))
batch
=
next
(
iter
(
scaler_loader
))[
0
].
reshape
(
-
1
,
3
)
# fixed for fcr data
scaler
.
fit
(
batch
)
return
scaler
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment