Skip to content

Commit 4208d70

Browse files
committed
fix bug and upload time series
1 parent 0e417f9 commit 4208d70

File tree

35 files changed

+102
-1
lines changed

35 files changed

+102
-1
lines changed
Lines changed: 102 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,105 @@
11
import pandas as pd
22
import matplotlib.pyplot as plt
3+
import torch
4+
from torch import nn, optim
5+
from torch.autograd import Variable
6+
import numpy as np
37

4-
data_csv = pd.read_csv('')
8+
np.random.seed(2017)
9+
# load data
10+
data_csv = pd.read_csv('./data.csv', usecols=[1])
11+
12+
# data preprocessing
13+
data_csv = data_csv.dropna()
14+
dataset = data_csv.values
15+
dataset = dataset.astype('float32')
16+
max_value = np.max(dataset)
17+
min_value = np.min(dataset)
18+
scalar = max_value - min_value
19+
dataset = list(map(lambda x: x / scalar, dataset))
20+
21+
22+
# create dataset
23+
def create_dataset(dataset, look_back=2):
24+
dataX, dataY = [], []
25+
for i in range(len(dataset) - look_back):
26+
a = dataset[i:(i + look_back)]
27+
dataX.append(a)
28+
dataY.append(dataset[i + look_back])
29+
return np.array(dataX), np.array(dataY)
30+
31+
32+
data_X, data_Y = create_dataset(dataset)
33+
34+
# split train set and test set
35+
train_size = int(len(data_X) * 0.7)
36+
test_size = len(data_X) - train_size
37+
train_X = data_X[:train_size]
38+
train_Y = data_Y[:train_size]
39+
test_X = data_X[train_size:]
40+
test_Y = data_Y[train_size:]
41+
42+
train_X = train_X.reshape(-1, 1, 2)
43+
train_Y = train_Y.reshape(-1, 1, 1)
44+
test_X = test_X.reshape(-1, 1, 2)
45+
46+
train_x = torch.from_numpy(train_X)
47+
train_y = torch.from_numpy(train_Y)
48+
test_x = torch.from_numpy(test_X)
49+
50+
51+
# define network
52+
class lstm(nn.Module):
53+
def __init__(self, input_size=2, hidden_size=4, output_size=1,
54+
num_layer=2):
55+
super(lstm, self).__init__()
56+
self.layer1 = nn.LSTM(input_size, hidden_size, num_layer)
57+
self.layer2 = nn.Linear(hidden_size, output_size)
58+
59+
def forward(self, x):
60+
x, _ = self.layer1(x) # seq, batch, hidden
61+
s, b, h = x.size()
62+
x = x.view(s * b, h)
63+
x = self.layer2(x)
64+
x = x.view(s, b, -1)
65+
return x
66+
67+
68+
model = lstm()
69+
if torch.cuda.is_available():
70+
model = model.cuda()
71+
criterion = nn.MSELoss()
72+
optimizer = optim.Adam(model.parameters(), lr=1e-2)
73+
74+
# In[195]:
75+
76+
total_epoch = 1000
77+
for epoch in range(total_epoch):
78+
if torch.cuda.is_available():
79+
train_x = train_x.cuda()
80+
train_y = train_y.cuda()
81+
var_x = Variable(train_x)
82+
var_y = Variable(train_y)
83+
out = model(var_x)
84+
loss = criterion(out, var_y)
85+
optimizer.zero_grad()
86+
loss.backward()
87+
optimizer.step()
88+
if (epoch + 1) % 100 == 0:
89+
print('epoch {}, loss is {}'.format(epoch + 1, loss.data[0]))
90+
91+
torch.save(model.state_dict(), './lstm.pth')
92+
model = model.eval()
93+
94+
data_X = data_X.reshape(-1, 1, 2)
95+
data_X = torch.from_numpy(data_X)
96+
var_data = Variable(data_X)
97+
predict = model(var_data)
98+
99+
predict = predict.cpu().data.numpy()
100+
101+
predict = predict.reshape(-1)
102+
103+
plt.plot(predict, 'r')
104+
plt.plot(dataset, 'b')
105+
plt.show()
File renamed without changes.

0 commit comments

Comments
 (0)