diff --git a/README.md b/README.md index 34ef4ab7..e5075916 100644 --- a/README.md +++ b/README.md @@ -7,10 +7,123 @@ Find associated tutorials at https://lazyprogrammer.me Find associated courses at https://deeplearningcourses.com +Please note that not all code from all courses will be found in this repository. Some newer code examples (e.g. most of Tensorflow 2.0) were done in Google Colab. Therefore, you should check the instructions given in the lectures for the course you are taking. -Direct Course Links + +How to I find the code for a particular course? +=============================================== + +The code for each course is separated by folder. You can determine which folder corresponds with which course by watching the "Where to get the code" lecture inside the course (usually Lecture 2 or 3). + +Remember: one folder = one course. + + +Why you should not fork this repo +================================= + +I've noticed that many people have out-of-date forks. Thus, I recommend not forking this repository if you take one of my courses. I am constantly updating my courses, and your fork will soon become out-of-date. You should clone the repository instead to make it easy to get updates (i.e. just "git pull" randomly and frequently). + + +Where is the code for your latest courses? +========================================== + +Beginning with Tensorflow 2, I started to use Google Colab. For those courses, unless otherwise noted, the code will be on Google Colab. Links to the notebooks are provided in the course. See the lecture "Where to get the code" for further details. + + +VIP Course Links =================== +**Advanced AI: Deep Reinforcement Learning in PyTorch (v2)** + +https://deeplearningcourses.com/c/deep-reinforcement-learning-in-pytorch + + +**Data Science: Transformers for Natural Language Processing** + +https://deeplearningcourses.com/c/data-science-transformers-nlp + + +**Machine Learning: Natural Language Processing in Python (V2)** + +https://deeplearningcourses.com/c/natural-language-processing-in-python + + +**Time Series Analysis, Forecasting, and Machine Learning** + +https://deeplearningcourses.com/c/time-series-analysis + + +**Financial Engineering and Artificial Intelligence in Python** + +https://deeplearningcourses.com/c/ai-finance + + +**PyTorch: Deep Learning and Artificial Intelligence** + +https://deeplearningcourses.com/c/pytorch-deep-learning + + +**Tensorflow 2.0: Deep Learning and Artificial Intelligence** (VIP Version) + +https://deeplearningcourses.com/c/deep-learning-tensorflow-2 + + +**Math 0-1: Linear Algebra for Data Science & Machine Learning** + +https://deeplearningcourses.com/c/linear-algebra-data-science + +**Math 0-1: Probability for Data Science & Machine Learning** + +https://deeplearningcourses.com/c/probability-data-science-machine-learning + + +Deep Learning Courses Exclusives +================================ + +Data Science: Bayesian Linear Regression in Python +https://deeplearningcourses.com/c/bayesian-linear-regression-in-python + +Data Science: Bayesian Classification in Python +https://deeplearningcourses.com/c/bayesian-classification-in-python + +Classical Statistical Inference and A/B Testing in Python +https://deeplearningcourses.com/c/statistical-inference-in-python + +Linear Programming for Linear Regression in Python +https://deeplearningcourses.com/c/linear-programming-python + +MATLAB for Students, Engineers, and Professionals in STEM +https://deeplearningcourses.com/c/matlab + + + +Other Course Links +================== + +Generative AI: ChatGPT & OpenAI LLMs in Python +https://deeplearningcourses.com/c/genai-openai-chatgpt + +Math 0-1: Matrix Calculus for Data Science & Machine Learning +https://deeplearningcourses.com/c/matrix-calculus-machine-learning + +Machine Learning: Modern Computer Vision & Generative AI +https://deeplearningcourses.com/c/computer-vision-kerascv + +DeepFakes & Voice Cloning: Machine Learning The Easy Way +https://deeplearningcourses.com/c/deepfakes-voice-cloning + +Financial Analysis: Build a ChatGPT Pairs Trading Bot +https://deeplearningcourses.com/c/chatgpt-pairs-trading + +Math 0-1: Calculus for Data Science & Machine Learning +https://deeplearningcourses.com/c/calculus-data-science + +Data Science & Machine Learning: Naive Bayes in Python +https://deeplearningcourses.com/c/data-science-machine-learning-naive-bayes-in-python + +Cutting-Edge AI: Deep Reinforcement Learning in Python +https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence + Recommender Systems and Deep Learning in Python https://deeplearningcourses.com/c/recommender-systems @@ -50,7 +163,7 @@ https://deeplearningcourses.com/c/data-science-linear-regression-in-python Deep Learning Prerequisites: Logistic Regression in Python https://deeplearningcourses.com/c/data-science-logistic-regression-in-python -Deep Learning in Python +Data Science: Deep Learning and Neural Networks in Python https://deeplearningcourses.com/c/data-science-deep-learning-in-python Cluster Analysis and Unsupervised Machine Learning in Python @@ -62,10 +175,10 @@ https://deeplearningcourses.com/c/data-science-supervised-machine-learning-in-py Bayesian Machine Learning in Python: A/B Testing https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing -Easy Natural Language Processing in Python +Data Science: Natural Language Processing in Python https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python -Practical Deep Learning in Theano and TensorFlow +Modern Deep Learning in Python https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow Ensemble Machine Learning in Python: Random Forest and AdaBoost diff --git a/ab_testing/bayesian_bandit.py b/ab_testing/bayesian_bandit.py index a930cf2b..61e8f812 100644 --- a/ab_testing/bayesian_bandit.py +++ b/ab_testing/bayesian_bandit.py @@ -12,15 +12,17 @@ from scipy.stats import beta +# np.random.seed(2) NUM_TRIALS = 2000 BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] -class Bandit(object): +class Bandit: def __init__(self, p): self.p = p self.a = 1 self.b = 1 + self.N = 0 # for information only def pull(self): return np.random.random() < self.p @@ -31,14 +33,15 @@ def sample(self): def update(self, x): self.a += x self.b += 1 - x + self.N += 1 def plot(bandits, trial): x = np.linspace(0, 1, 200) for b in bandits: y = beta.pdf(x, b.a, b.b) - plt.plot(x, y, label="real p: %.4f" % b.p) - plt.title("Bandit distributions after %s trials" % trial) + plt.plot(x, y, label=f"real p: {b.p:.4f}, win rate = {b.a - 1}/{b.N}") + plt.title(f"Bandit distributions after {trial} trials") plt.legend() plt.show() @@ -47,27 +50,28 @@ def experiment(): bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.zeros(NUM_TRIALS) for i in range(NUM_TRIALS): + # Thompson sampling + j = np.argmax([b.sample() for b in bandits]) - # take a sample from each bandit - bestb = None - maxsample = -1 - allsamples = [] # let's collect these just to print for debugging - for b in bandits: - sample = b.sample() - allsamples.append("%.4f" % sample) - if sample > maxsample: - maxsample = sample - bestb = b + # plot the posteriors if i in sample_points: - print("current samples: %s" % allsamples) plot(bandits, i) # pull the arm for the bandit with the largest sample - x = bestb.pull() + x = bandits[j].pull() + + # update rewards + rewards[i] = x # update the distribution for the bandit whose arm we just pulled - bestb.update(x) + bandits[j].update(x) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) if __name__ == "__main__": diff --git a/ab_testing/bayesian_normal.py b/ab_testing/bayesian_normal.py new file mode 100644 index 00000000..07083d1b --- /dev/null +++ b/ab_testing/bayesian_normal.py @@ -0,0 +1,84 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt +from scipy.stats import norm + + +np.random.seed(1) +NUM_TRIALS = 2000 +BANDIT_MEANS = [1, 2, 3] + + +class Bandit: + def __init__(self, true_mean): + self.true_mean = true_mean + # parameters for mu - prior is N(0,1) + self.m = 0 + self.lambda_ = 1 + self.tau = 1 + self.N = 0 + + def pull(self): + return np.random.randn() / np.sqrt(self.tau) + self.true_mean + + def sample(self): + return np.random.randn() / np.sqrt(self.lambda_) + self.m + + def update(self, x): + self.m = (self.tau * x + self.lambda_ * self.m) / (self.tau + self.lambda_) + self.lambda_ += self.tau + self.N += 1 + + +def plot(bandits, trial): + x = np.linspace(-3, 6, 200) + for b in bandits: + y = norm.pdf(x, b.m, np.sqrt(1. / b.lambda_)) + plt.plot(x, y, label=f"real mean: {b.true_mean:.4f}, num plays: {b.N}") + plt.title(f"Bandit distributions after {trial} trials") + plt.legend() + plt.show() + + +def run_experiment(): + bandits = [Bandit(m) for m in BANDIT_MEANS] + + sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.empty(NUM_TRIALS) + for i in range(NUM_TRIALS): + # Thompson sampling + j = np.argmax([b.sample() for b in bandits]) + + # plot the posteriors + if i in sample_points: + plot(bandits, i) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + # update rewards + rewards[i] = x + + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + for m in BANDIT_MEANS: + plt.plot(np.ones(NUM_TRIALS)*m) + plt.show() + + return cumulative_average + +if __name__ == '__main__': + run_experiment() + + diff --git a/ab_testing/bayesian_starter.py b/ab_testing/bayesian_starter.py new file mode 100644 index 00000000..68e12f75 --- /dev/null +++ b/ab_testing/bayesian_starter.py @@ -0,0 +1,78 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np +from scipy.stats import beta + + +# np.random.seed(2) +NUM_TRIALS = 2000 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + self.p = p + self.a = # TODO + self.b = # TODO + self.N = 0 # for information only + + def pull(self): + return np.random.random() < self.p + + def sample(self): + return # TODO - draw a sample from Beta(a, b) + + def update(self, x): + self.a = # TODO + self.b = # TODO + self.N += 1 + + +def plot(bandits, trial): + x = np.linspace(0, 1, 200) + for b in bandits: + y = beta.pdf(x, b.a, b.b) + plt.plot(x, y, label=f"real p: {b.p:.4f}, win rate = {b.a - 1}/{b.N}") + plt.title(f"Bandit distributions after {trial} trials") + plt.legend() + plt.show() + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # Thompson sampling + j = # TODO + + # plot the posteriors + if i in sample_points: + plot(bandits, i) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + +if __name__ == "__main__": + experiment() diff --git a/ab_testing/cdfs_and_percentiles.py b/ab_testing/cdfs_and_percentiles.py new file mode 100644 index 00000000..124bc408 --- /dev/null +++ b/ab_testing/cdfs_and_percentiles.py @@ -0,0 +1,35 @@ +import numpy as np +import matplotlib.pyplot as plt +from scipy.stats import norm + + +mu = 170 +sd = 7 + + +# generate samples from our distribution +x = norm.rvs(loc=mu, scale=sd, size=100) + +# maximum likelihood mean +x.mean() + +# maximum likelihood variance +x.var() + +# maximum likelihood std +x.std() + +# unbiased variance +x.var(ddof=1) + +# unbiased std +x.std(ddof=1) + +# at what height are you in the 95th percentile? +norm.ppf(0.95, loc=mu, scale=sd) + +# you are 160 cm tall, what percentile are you in? +norm.cdf(160, loc=mu, scale=sd) + +# you are 180 cm tall, what is the probability that someone is taller than you? +1 - norm.cdf(180, loc=mu, scale=sd) \ No newline at end of file diff --git a/ab_testing/comparing_epsilons.py b/ab_testing/comparing_epsilons.py new file mode 100755 index 00000000..8fe885c9 --- /dev/null +++ b/ab_testing/comparing_epsilons.py @@ -0,0 +1,89 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + + +class BanditArm: + def __init__(self, m): + self.m = m + self.m_estimate = 0 + self.N = 0 + + def pull(self): + return np.random.randn() + self.m + + def update(self, x): + self.N += 1 + self.m_estimate = (1 - 1.0/self.N)*self.m_estimate + 1.0/self.N*x + + +def run_experiment(m1, m2, m3, eps, N): + bandits = [BanditArm(m1), BanditArm(m2), BanditArm(m3)] + + # count number of suboptimal choices + means = np.array([m1, m2, m3]) + true_best = np.argmax(means) + count_suboptimal = 0 + + data = np.empty(N) + + for i in range(N): + # epsilon greedy + p = np.random.random() + if p < eps: + j = np.random.choice(len(bandits)) + else: + j = np.argmax([b.m_estimate for b in bandits]) + x = bandits[j].pull() + bandits[j].update(x) + + if j != true_best: + count_suboptimal += 1 + + # for the plot + data[i] = x + cumulative_average = np.cumsum(data) / (np.arange(N) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + plt.plot(np.ones(N)*m1) + plt.plot(np.ones(N)*m2) + plt.plot(np.ones(N)*m3) + plt.xscale('log') + plt.show() + + for b in bandits: + print(b.m_estimate) + + print("percent suboptimal for epsilon = %s:" % eps, float(count_suboptimal) / N) + + return cumulative_average + +if __name__ == '__main__': + m1, m2, m3 = 1.5, 2.5, 3.5 + c_1 = run_experiment(m1, m2, m3, 0.1, 100000) + c_05 = run_experiment(m1, m2, m3, 0.05, 100000) + c_01 = run_experiment(m1, m2, m3, 0.01, 100000) + + # log scale plot + plt.plot(c_1, label='eps = 0.1') + plt.plot(c_05, label='eps = 0.05') + plt.plot(c_01, label='eps = 0.01') + plt.legend() + plt.xscale('log') + plt.show() + + + # linear plot + plt.plot(c_1, label='eps = 0.1') + plt.plot(c_05, label='eps = 0.05') + plt.plot(c_01, label='eps = 0.01') + plt.legend() + plt.show() + diff --git a/ab_testing/epsilon_greedy.py b/ab_testing/epsilon_greedy.py new file mode 100755 index 00000000..b6eeb067 --- /dev/null +++ b/ab_testing/epsilon_greedy.py @@ -0,0 +1,93 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class BanditArm: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def choose_random_argmax(a): + idx = np.argwhere(np.amax(a) == a).flatten() + return np.random.choice(idx) + + +def experiment(): + bandits = [BanditArm(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + num_times_explored = 0 + num_times_exploited = 0 + num_optimal = 0 + optimal_j = np.argmax([b.p for b in bandits]) + print("optimal j:", optimal_j) + + for i in range(NUM_TRIALS): + + # use epsilon-greedy to select the next bandit + if np.random.random() < EPS: + num_times_explored += 1 + j = np.random.randint(len(bandits)) + else: + num_times_exploited += 1 + j = choose_random_argmax([b.p_estimate for b in bandits]) + + if j == optimal_j: + num_optimal += 1 + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num_times_explored:", num_times_explored) + print("num_times_exploited:", num_times_exploited) + print("num times selected optimal bandit:", num_optimal) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/ab_testing/epsilon_greedy_starter.py b/ab_testing/epsilon_greedy_starter.py new file mode 100755 index 00000000..4b2a77d1 --- /dev/null +++ b/ab_testing/epsilon_greedy_starter.py @@ -0,0 +1,88 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class BanditArm: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = # TODO + self.N = # TODO + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N = # TODO + self.p_estimate = # TODO + + +def experiment(): + bandits = [BanditArm(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + num_times_explored = 0 + num_times_exploited = 0 + num_optimal = 0 + optimal_j = np.argmax([b.p for b in bandits]) + print("optimal j:", optimal_j) + + for i in range(NUM_TRIALS): + + # use epsilon-greedy to select the next bandit + if np.random.random() < EPS: + num_times_explored += 1 + j = # TODO + else: + num_times_exploited += 1 + j = # TODO + + if j == optimal_j: + num_optimal += 1 + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num_times_explored:", num_times_explored) + print("num_times_exploited:", num_times_exploited) + print("num times selected optimal bandit:", num_optimal) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/ab_testing/extra_reading.txt b/ab_testing/extra_reading.txt new file mode 100644 index 00000000..71360a2e --- /dev/null +++ b/ab_testing/extra_reading.txt @@ -0,0 +1,20 @@ +The Unbiased Estimate of the Covariance Matrix +https://lazyprogrammer.me/covariance-matrix-divide-by-n-or-n-1/ + +Algorithms for the multi-armed bandit problem +https://www.cs.mcgill.ca/~vkules/bandits.pdf + +UCB REVISITED: IMPROVED REGRET BOUNDS FOR THE STOCHASTIC MULTI-ARMED BANDIT PROBLEM +http://personal.unileoben.ac.at/rortner/Pubs/UCBRev.pdf + +Finite-time Analysis of the Multiarmed Bandit Problem +https://link.springer.com/article/10.1023/A:1013689704352 + +A Tutorial on Thompson Sampling +https://web.stanford.edu/~bvr/pubs/TS_Tutorial.pdf + +An Empirical Evaluation of Thompson Sampling +https://papers.nips.cc/paper/4321-an-empirical-evaluation-of-thompson-sampling.pdf + +Analysis of Thompson Sampling for the Multi-armed Bandit Problem +http://proceedings.mlr.press/v23/agrawal12/agrawal12.pdf \ No newline at end of file diff --git a/ab_testing/optimistic.py b/ab_testing/optimistic.py new file mode 100644 index 00000000..1d024fef --- /dev/null +++ b/ab_testing/optimistic.py @@ -0,0 +1,71 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 5. + self.N = 1. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # use optimistic initial values to select the next bandit + j = np.argmax([b.p_estimate for b in bandits]) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.ylim([0, 1]) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/ab_testing/optimistic_starter.py b/ab_testing/optimistic_starter.py new file mode 100644 index 00000000..56b4e5c9 --- /dev/null +++ b/ab_testing/optimistic_starter.py @@ -0,0 +1,71 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = # TODO + self.N = # TODO + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + # TODO + self.p_estimate = # TODO + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # use optimistic initial values to select the next bandit + j = # TODO + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.ylim([0, 1]) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/ab_testing/ucb1.py b/ab_testing/ucb1.py new file mode 100644 index 00000000..5779b654 --- /dev/null +++ b/ab_testing/ucb1.py @@ -0,0 +1,81 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +# https://books.google.ca/books?id=_ATpBwAAQBAJ&lpg=PA201&ots=rinZM8jQ6s&dq=hoeffding%20bound%20gives%20probability%20%22greater%20than%201%22&pg=PA201#v=onepage&q&f=false +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + + +NUM_TRIALS = 100000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def ucb(mean, n, nj): + return mean + np.sqrt(2*np.log(n) / nj) + + +def run_experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + rewards = np.empty(NUM_TRIALS) + total_plays = 0 + + # initialization: play each bandit once + for j in range(len(bandits)): + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + for i in range(NUM_TRIALS): + j = np.argmax([ucb(b.p_estimate, total_plays, b.N) for b in bandits]) + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + # for the plot + rewards[i] = x + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.xscale('log') + plt.show() + + # plot moving average ctr linear + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + + for b in bandits: + print(b.p_estimate) + + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + return cumulative_average + +if __name__ == '__main__': + run_experiment() + diff --git a/ab_testing/ucb1_starter.py b/ab_testing/ucb1_starter.py new file mode 100644 index 00000000..9e9c3106 --- /dev/null +++ b/ab_testing/ucb1_starter.py @@ -0,0 +1,81 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +# https://books.google.ca/books?id=_ATpBwAAQBAJ&lpg=PA201&ots=rinZM8jQ6s&dq=hoeffding%20bound%20gives%20probability%20%22greater%20than%201%22&pg=PA201#v=onepage&q&f=false +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + + +NUM_TRIALS = 100000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def ucb(mean, n, nj): + return # TODO + + +def run_experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + rewards = np.empty(NUM_TRIALS) + total_plays = 0 + + # initialization: play each bandit once + for j in range(len(bandits)): + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + for i in range(NUM_TRIALS): + j = # TODO + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + # for the plot + rewards[i] = x + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.xscale('log') + plt.show() + + # plot moving average ctr linear + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + + for b in bandits: + print(b.p_estimate) + + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + return cumulative_average + +if __name__ == '__main__': + run_experiment() + diff --git a/ann_class/backprop.py b/ann_class/backprop.py index 9ce9f85d..62ce9e73 100644 --- a/ann_class/backprop.py +++ b/ann_class/backprop.py @@ -144,10 +144,16 @@ def main(): # this is gradient ASCENT, not DESCENT # be comfortable with both! # oldW2 = W2.copy() - W2 += learning_rate * derivative_w2(hidden, T, output) - b2 += learning_rate * derivative_b2(T, output) - W1 += learning_rate * derivative_w1(X, hidden, T, output, W2) - b1 += learning_rate * derivative_b1(T, output, W2, hidden) + + gW2 = derivative_w2(hidden, T, output) + gb2 = derivative_b2(T, output) + gW1 = derivative_w1(X, hidden, T, output, W2) + gb1 = derivative_b1(T, output, W2, hidden) + + W2 += learning_rate * gW2 + b2 += learning_rate * gb2 + W1 += learning_rate * gW1 + b1 += learning_rate * gb1 plt.plot(costs) plt.show() diff --git a/ann_class/extra_reading.txt b/ann_class/extra_reading.txt new file mode 100644 index 00000000..3d4fde45 --- /dev/null +++ b/ann_class/extra_reading.txt @@ -0,0 +1,11 @@ +The Chain Rule of Calculus +http://tutorial.math.lamar.edu/Classes/CalcI/ChainRule.aspx + +Yes you should understand backprop by Andrej Karpathy +https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b + +The Matrix Cookbook +https://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf + +Rumelhart, D. E., Hinton, G. E., and Williams, R. J. (1986) Learning representations by back-propagating errors. +https://www.iro.umontreal.ca/~vincentp/ift3395/lectures/backprop_old.pdf \ No newline at end of file diff --git a/ann_class/xor_donut.py b/ann_class/xor_donut.py index 236883d3..48331337 100644 --- a/ann_class/xor_donut.py +++ b/ann_class/xor_donut.py @@ -82,10 +82,17 @@ def test_xor(): er = np.mean(prediction != Y) LL.append(ll) - W2 += learning_rate * (derivative_w2(Z, Y, pY) - regularization * W2) - b2 += learning_rate * (derivative_b2(Y, pY) - regularization * b2) - W1 += learning_rate * (derivative_w1(X, Z, Y, pY, W2) - regularization * W1) - b1 += learning_rate * (derivative_b1(Z, Y, pY, W2) - regularization * b1) + + # get gradients + gW2 = derivative_w2(Z, Y, pY) + gb2 = derivative_b2(Y, pY) + gW1 = derivative_w1(X, Z, Y, pY, W2) + gb1 = derivative_b1(Z, Y, pY, W2) + + W2 += learning_rate * (gW2 - regularization * W2) + b2 += learning_rate * (gb2 - regularization * b2) + W1 += learning_rate * (gW1 - regularization * W1) + b1 += learning_rate * (gb1 - regularization * b1) if i % 1000 == 0: print(ll) @@ -128,10 +135,17 @@ def test_donut(): prediction = predict(X, W1, b1, W2, b2) er = np.abs(prediction - Y).mean() LL.append(ll) - W2 += learning_rate * (derivative_w2(Z, Y, pY) - regularization * W2) - b2 += learning_rate * (derivative_b2(Y, pY) - regularization * b2) - W1 += learning_rate * (derivative_w1(X, Z, Y, pY, W2) - regularization * W1) - b1 += learning_rate * (derivative_b1(Z, Y, pY, W2) - regularization * b1) + + # get gradients + gW2 = derivative_w2(Z, Y, pY) + gb2 = derivative_b2(Y, pY) + gW1 = derivative_w1(X, Z, Y, pY, W2) + gb1 = derivative_b1(Z, Y, pY, W2) + + W2 += learning_rate * (gW2 - regularization * W2) + b2 += learning_rate * (gb2 - regularization * b2) + W1 += learning_rate * (gW1 - regularization * W1) + b1 += learning_rate * (gb1 - regularization * b1) if i % 300 == 0: print("i:", i, "ll:", ll, "classification rate:", 1 - er) plt.plot(LL) @@ -139,8 +153,8 @@ def test_donut(): if __name__ == '__main__': - # test_xor() - test_donut() + test_xor() + # test_donut() diff --git a/ann_class2/adam.py b/ann_class2/adam.py index 1ae7813a..3c0243de 100644 --- a/ann_class2/adam.py +++ b/ann_class2/adam.py @@ -105,10 +105,10 @@ def main(): t += 1 # apply updates to the params - W1 = W1 - lr0 * hat_mW1 / np.sqrt(hat_vW1 + eps) - b1 = b1 - lr0 * hat_mb1 / np.sqrt(hat_vb1 + eps) - W2 = W2 - lr0 * hat_mW2 / np.sqrt(hat_vW2 + eps) - b2 = b2 - lr0 * hat_mb2 / np.sqrt(hat_vb2 + eps) + W1 = W1 - lr0 * hat_mW1 / (np.sqrt(hat_vW1) + eps) + b1 = b1 - lr0 * hat_mb1 / (np.sqrt(hat_vb1) + eps) + W2 = W2 - lr0 * hat_mW2 / (np.sqrt(hat_vW2) + eps) + b2 = b2 - lr0 * hat_mb2 / (np.sqrt(hat_vb2) + eps) if j % print_period == 0: @@ -157,25 +157,28 @@ def main(): Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] pYbatch, Z = forward(Xbatch, W1, b1, W2, b2) - # updates + # derivatives gW2 = derivative_w2(Z, Ybatch, pYbatch) + reg*W2 - cache_W2 = decay_rate*cache_W2 + (1 - decay_rate)*gW2*gW2 - dW2 = mu * dW2 + (1 - mu) * lr0 * gW2 / (np.sqrt(cache_W2) + eps) - W2 -= dW2 - gb2 = derivative_b2(Ybatch, pYbatch) + reg*b2 - cache_b2 = decay_rate*cache_b2 + (1 - decay_rate)*gb2*gb2 - db2 = mu * db2 + (1 - mu) * lr0 * gb2 / (np.sqrt(cache_b2) + eps) - b2 -= db2 - gW1 = derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1 - cache_W1 = decay_rate*cache_W1 + (1 - decay_rate)*gW1*gW1 - dW1 = mu * dW1 + (1 - mu) * lr0 * gW1 / (np.sqrt(cache_W1) + eps) - W1 -= dW1 - gb1 = derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1 + + # caches + cache_W2 = decay_rate*cache_W2 + (1 - decay_rate)*gW2*gW2 + cache_b2 = decay_rate*cache_b2 + (1 - decay_rate)*gb2*gb2 + cache_W1 = decay_rate*cache_W1 + (1 - decay_rate)*gW1*gW1 cache_b1 = decay_rate*cache_b1 + (1 - decay_rate)*gb1*gb1 + + # momentum + dW2 = mu * dW2 + (1 - mu) * lr0 * gW2 / (np.sqrt(cache_W2) + eps) + db2 = mu * db2 + (1 - mu) * lr0 * gb2 / (np.sqrt(cache_b2) + eps) + dW1 = mu * dW1 + (1 - mu) * lr0 * gW1 / (np.sqrt(cache_W1) + eps) db1 = mu * db1 + (1 - mu) * lr0 * gb1 / (np.sqrt(cache_b1) + eps) + + # updates + W2 -= dW2 + b2 -= db2 + W1 -= dW1 b1 -= db1 if j % print_period == 0: diff --git a/ann_class2/batch_norm_theano.py b/ann_class2/batch_norm_theano.py index cfb9d999..c86c11a9 100644 --- a/ann_class2/batch_norm_theano.py +++ b/ann_class2/batch_norm_theano.py @@ -95,6 +95,19 @@ def forward(self, X): return self.f(X.dot(self.W) + self.b) +def momentum_updates(cost, params, lr, mu): + grads = T.grad(cost, params) + updates = [] + + for p, g in zip(params, grads): + dp = theano.shared(p.get_value() * 0) + new_dp = mu*dp - lr*g + new_p = p + new_dp + updates.append((dp, new_dp)) + updates.append((p, new_p)) + return updates + + class ANN(object): def __init__(self, hidden_layer_sizes): self.hidden_layer_sizes = hidden_layer_sizes @@ -125,9 +138,6 @@ def fit(self, X, Y, Xtest, Ytest, activation=T.nnet.relu, learning_rate=1e-2, mu for h in self.layers: self.params += h.params - # for momentum - dparams = [theano.shared(np.zeros_like(p.get_value())) for p in self.params] - # note! we will need to build the output differently # for train and test (prediction) @@ -143,11 +153,7 @@ def fit(self, X, Y, Xtest, Ytest, activation=T.nnet.relu, learning_rate=1e-2, mu grads = T.grad(cost, self.params) # momentum only - updates = [ - (p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads) - ] + [ - (dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads) - ] + updates = momentum_updates(cost, self.params, learning_rate, mu) for layer in self.layers[:-1]: updates += layer.running_update diff --git a/ann_class2/dropout_theano.py b/ann_class2/dropout_theano.py index 81e5bebe..61486626 100644 --- a/ann_class2/dropout_theano.py +++ b/ann_class2/dropout_theano.py @@ -19,6 +19,19 @@ from sklearn.utils import shuffle +def momentum_updates(cost, params, lr, mu): + grads = T.grad(cost, params) + updates = [] + + for p, g in zip(params, grads): + dp = theano.shared(p.get_value() * 0) + new_dp = mu*dp - lr*g + new_p = p + new_dp + updates.append((dp, new_dp)) + updates.append((p, new_p)) + return updates + + class HiddenLayer(object): def __init__(self, M1, M2, an_id): self.id = an_id @@ -39,7 +52,7 @@ def __init__(self, hidden_layer_sizes, p_keep): self.hidden_layer_sizes = hidden_layer_sizes self.dropout_rates = p_keep - def fit(self, X, Y, Xvalid, Yvalid, learning_rate=1e-4, mu=0.9, decay=0.9, epochs=8, batch_sz=100, show_fig=False): + def fit(self, X, Y, Xvalid, Yvalid, learning_rate=1e-2, mu=0.9, decay=0.9, epochs=10, batch_sz=100, show_fig=False): X = X.astype(np.float32) Y = Y.astype(np.int32) Xvalid = Xvalid.astype(np.float32) @@ -75,32 +88,7 @@ def fit(self, X, Y, Xvalid, Yvalid, learning_rate=1e-4, mu=0.9, decay=0.9, epoch # this cost is for training cost = -T.mean(T.log(pY_train[T.arange(thY.shape[0]), thY])) - - # gradients wrt each param - grads = T.grad(cost, self.params) - - # for momentum - dparams = [theano.shared(np.zeros_like(p.get_value())) for p in self.params] - - # for rmsprop - cache = [theano.shared(np.ones_like(p.get_value())) for p in self.params] - - new_cache = [decay*c + (1-decay)*g*g for p, c, g in zip(self.params, cache, grads)] - new_dparams = [mu*dp - learning_rate*g/T.sqrt(new_c + 1e-10) for p, new_c, dp, g in zip(self.params, new_cache, dparams, grads)] - updates = [ - (c, new_c) for c, new_c in zip(cache, new_cache) - ] + [ - (dp, new_dp) for dp, new_dp in zip(dparams, new_dparams) - ] + [ - (p, p + new_dp) for p, new_dp in zip(self.params, new_dparams) - ] - - # momentum only - # updates = [ - # (p, p + mu*dp - learning_rate*T.grad(cost, p)) for p, dp in zip(self.params, dparams) - # ] + [ - # (dp, mu*dp - learning_rate*T.grad(cost, p)) for p, dp in zip(self.params, dparams) - # ] + updates = momentum_updates(cost, self.params, learning_rate, mu) train_op = theano.function( inputs=[thX, thY], diff --git a/ann_class2/extra_reading.txt b/ann_class2/extra_reading.txt index f1825dd0..64c17548 100644 --- a/ann_class2/extra_reading.txt +++ b/ann_class2/extra_reading.txt @@ -7,6 +7,9 @@ https://arxiv.org/abs/1609.08326 Asynchronous Stochastic Gradient Descent with Variance Reduction for Non-Convex Optimization https://arxiv.org/abs/1604.03584 +Adam: A Method for Stochastic Optimization +https://arxiv.org/abs/1412.6980 + Large Scale Distributed Deep Networks https://static.googleusercontent.com/media/research.google.com/en//archive/large_deep_networks_nips2012.pdf @@ -25,4 +28,7 @@ Advances in optimizing Recurrent Networks by Yoshua Bengio, Section 3.5 http://arxiv.org/pdf/1212.0901v2.pdf Dropout: A Simple Way to Prevent Neural Networks from Overfitting -https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf \ No newline at end of file +https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf + +The Loss Surfaces of Multilayer Networks +https://arxiv.org/pdf/1412.0233.pdf \ No newline at end of file diff --git a/ann_class2/keras_example.py b/ann_class2/keras_example.py index 0fb28736..aa9a5e19 100644 --- a/ann_class2/keras_example.py +++ b/ann_class2/keras_example.py @@ -75,8 +75,8 @@ plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/ann_class2/keras_functional.py b/ann_class2/keras_functional.py index b8c1a793..265d3f9b 100644 --- a/ann_class2/keras_functional.py +++ b/ann_class2/keras_functional.py @@ -70,8 +70,8 @@ plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/ann_class2/momentum.py b/ann_class2/momentum.py index 5df6fa30..8fb86962 100644 --- a/ann_class2/momentum.py +++ b/ann_class2/momentum.py @@ -56,17 +56,24 @@ def main(): losses_batch = [] errors_batch = [] for i in range(max_iter): + Xtrain, Ytrain, Ytrain_ind = shuffle(Xtrain, Ytrain, Ytrain_ind) for j in range(n_batches): Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),] Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] pYbatch, Z = forward(Xbatch, W1, b1, W2, b2) # print "first batch cost:", cost(pYbatch, Ybatch) + # gradients + gW2 = derivative_w2(Z, Ybatch, pYbatch) + reg*W2 + gb2 = derivative_b2(Ybatch, pYbatch) + reg*b2 + gW1 = derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1 + gb1 = derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1 + # updates - W2 -= lr*(derivative_w2(Z, Ybatch, pYbatch) + reg*W2) - b2 -= lr*(derivative_b2(Ybatch, pYbatch) + reg*b2) - W1 -= lr*(derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1) - b1 -= lr*(derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1) + W2 -= lr*gW2 + b2 -= lr*gb2 + W1 -= lr*gW1 + b1 -= lr*gb1 if j % print_period == 0: pY, _ = forward(Xtest, W1, b1, W2, b2) @@ -94,6 +101,7 @@ def main(): dW1 = 0 db1 = 0 for i in range(max_iter): + Xtrain, Ytrain, Ytrain_ind = shuffle(Xtrain, Ytrain, Ytrain_ind) for j in range(n_batches): Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),] Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] @@ -145,6 +153,7 @@ def main(): vW1 = 0 vb1 = 0 for i in range(max_iter): + Xtrain, Ytrain, Ytrain_ind = shuffle(Xtrain, Ytrain, Ytrain_ind) for j in range(n_batches): Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),] Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] diff --git a/ann_class2/rmsprop.py b/ann_class2/rmsprop.py index 43afce23..f0bb093e 100644 --- a/ann_class2/rmsprop.py +++ b/ann_class2/rmsprop.py @@ -48,11 +48,17 @@ def main(): pYbatch, Z = forward(Xbatch, W1, b1, W2, b2) # print "first batch cost:", cost(pYbatch, Ybatch) + # gradients + gW2 = derivative_w2(Z, Ybatch, pYbatch) + reg*W2 + gb2 = derivative_b2(Ybatch, pYbatch) + reg*b2 + gW1 = derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1 + gb1 = derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1 + # updates - W2 -= lr*(derivative_w2(Z, Ybatch, pYbatch) + reg*W2) - b2 -= lr*(derivative_b2(Ybatch, pYbatch) + reg*b2) - W1 -= lr*(derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1) - b1 -= lr*(derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1) + W2 -= lr*gW2 + b2 -= lr*gb2 + W1 -= lr*gW1 + b1 -= lr*gb1 if j % print_period == 0: # calculate just for LL @@ -91,21 +97,22 @@ def main(): pYbatch, Z = forward(Xbatch, W1, b1, W2, b2) # print "first batch cost:", cost(pYbatch, Ybatch) - # updates + # gradients gW2 = derivative_w2(Z, Ybatch, pYbatch) + reg*W2 - cache_W2 = decay_rate*cache_W2 + (1 - decay_rate)*gW2*gW2 - W2 -= lr0 * gW2 / (np.sqrt(cache_W2) + eps) - gb2 = derivative_b2(Ybatch, pYbatch) + reg*b2 - cache_b2 = decay_rate*cache_b2 + (1 - decay_rate)*gb2*gb2 - b2 -= lr0 * gb2 / (np.sqrt(cache_b2) + eps) - gW1 = derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1 - cache_W1 = decay_rate*cache_W1 + (1 - decay_rate)*gW1*gW1 - W1 -= lr0 * gW1 / (np.sqrt(cache_W1) + eps) - gb1 = derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1 + + # caches + cache_W2 = decay_rate*cache_W2 + (1 - decay_rate)*gW2*gW2 + cache_b2 = decay_rate*cache_b2 + (1 - decay_rate)*gb2*gb2 + cache_W1 = decay_rate*cache_W1 + (1 - decay_rate)*gW1*gW1 cache_b1 = decay_rate*cache_b1 + (1 - decay_rate)*gb1*gb1 + + # updates + W2 -= lr0 * gW2 / (np.sqrt(cache_W2) + eps) + b2 -= lr0 * gb2 / (np.sqrt(cache_b2) + eps) + W1 -= lr0 * gW1 / (np.sqrt(cache_W1) + eps) b1 -= lr0 * gb1 / (np.sqrt(cache_b1) + eps) if j % print_period == 0: diff --git a/ann_class2/sgd.py b/ann_class2/sgd.py index 3c338b6d..91ab78b1 100644 --- a/ann_class2/sgd.py +++ b/ann_class2/sgd.py @@ -1,15 +1,9 @@ # In this file we compare the progression of the cost function vs. iteration # for 3 cases: # 1) full gradient descent -# 2) batch gradient descent +# 2) mini-batch gradient descent # 3) stochastic gradient descent # -# We use the PCA-transformed data to keep the dimensionality down (D=300) -# I've tailored this example so that the training time for each is feasible. -# So what we are really comparing is how quickly each type of GD can converge, -# (but not actually waiting for convergence) and what the cost looks like at -# each iteration. -# # For the class Data Science: Practical Deep Learning Concepts in Theano and TensorFlow # https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow # https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow @@ -24,11 +18,11 @@ from sklearn.utils import shuffle from datetime import datetime -from util import get_transformed_data, forward, error_rate, cost, gradW, gradb, y2indicator +from util import get_normalized_data, forward, error_rate, cost, gradW, gradb, y2indicator def main(): - Xtrain, Xtest, Ytrain, Ytest = get_transformed_data() + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() print("Performing logistic regression...") N, D = Xtrain.shape @@ -37,103 +31,149 @@ def main(): # 1. full W = np.random.randn(D, 10) / np.sqrt(D) + W0 = W.copy() # save for later b = np.zeros(10) - LL = [] - lr = 0.0001 - reg = 0.01 + test_losses_full = [] + lr = 0.9 + reg = 0. t0 = datetime.now() + last_dt = 0 + intervals = [] for i in range(50): p_y = forward(Xtrain, W, b) - W += lr*(gradW(Ytrain_ind, p_y, Xtrain) - reg*W) - b += lr*(gradb(Ytrain_ind, p_y) - reg*b) - + gW = gradW(Ytrain_ind, p_y, Xtrain) / N + gb = gradb(Ytrain_ind, p_y) / N + + W += lr*(gW - reg*W) + b += lr*(gb - reg*b) p_y_test = forward(Xtest, W, b) - ll = cost(p_y_test, Ytest_ind) - LL.append(ll) - if i % 1 == 0: - err = error_rate(p_y_test, Ytest) - if i % 10 == 0: - print("Cost at iteration %d: %.6f" % (i, ll)) - print("Error rate:", err) + test_loss = cost(p_y_test, Ytest_ind) + dt = (datetime.now() - t0).total_seconds() + + # save these + dt2 = dt - last_dt + last_dt = dt + intervals.append(dt2) + + test_losses_full.append([dt, test_loss]) + if (i + 1) % 10 == 0: + print("Cost at iteration %d: %.6f" % (i + 1, test_loss)) p_y = forward(Xtest, W, b) print("Final error rate:", error_rate(p_y, Ytest)) print("Elapsted time for full GD:", datetime.now() - t0) + # save the max time so we don't surpass it in subsequent iterations + max_dt = dt + avg_interval_dt = np.mean(intervals) + # 2. stochastic - W = np.random.randn(D, 10) / np.sqrt(D) + W = W0.copy() b = np.zeros(10) - LL_stochastic = [] - lr = 0.0001 - reg = 0.01 + test_losses_sgd = [] + lr = 0.001 + reg = 0. t0 = datetime.now() + last_dt_calculated_loss = 0 + done = False for i in range(50): # takes very long since we're computing cost for 41k samples tmpX, tmpY = shuffle(Xtrain, Ytrain_ind) - for n in range(min(N, 500)): # shortcut so it won't take so long... + for n in range(N): x = tmpX[n,:].reshape(1,D) y = tmpY[n,:].reshape(1,10) p_y = forward(x, W, b) - W += lr*(gradW(y, p_y, x) - reg*W) - b += lr*(gradb(y, p_y) - reg*b) + gW = gradW(y, p_y, x) + gb = gradb(y, p_y) + + W += lr*(gW - reg*W) + b += lr*(gb - reg*b) + + dt = (datetime.now() - t0).total_seconds() + dt2 = dt - last_dt_calculated_loss - p_y_test = forward(Xtest, W, b) - ll = cost(p_y_test, Ytest_ind) - LL_stochastic.append(ll) + if dt2 > avg_interval_dt: + last_dt_calculated_loss = dt + p_y_test = forward(Xtest, W, b) + test_loss = cost(p_y_test, Ytest_ind) + test_losses_sgd.append([dt, test_loss]) - if i % 1 == 0: - err = error_rate(p_y_test, Ytest) - if i % 10 == 0: - print("Cost at iteration %d: %.6f" % (i, ll)) - print("Error rate:", err) + # time to quit + if dt > max_dt: + done = True + break + if done: + break + + if (i + 1) % 1 == 0: + print("Cost at iteration %d: %.6f" % (i + 1, test_loss)) p_y = forward(Xtest, W, b) print("Final error rate:", error_rate(p_y, Ytest)) print("Elapsted time for SGD:", datetime.now() - t0) - # 3. batch - W = np.random.randn(D, 10) / np.sqrt(D) + # 3. mini-batch + W = W0.copy() b = np.zeros(10) - LL_batch = [] - lr = 0.0001 - reg = 0.01 + test_losses_batch = [] batch_sz = 500 - n_batches = N // batch_sz + lr = 0.08 + reg = 0. + n_batches = int(np.ceil(N / batch_sz)) + t0 = datetime.now() + last_dt_calculated_loss = 0 + done = False for i in range(50): tmpX, tmpY = shuffle(Xtrain, Ytrain_ind) for j in range(n_batches): - x = tmpX[j*batch_sz:(j*batch_sz + batch_sz),:] - y = tmpY[j*batch_sz:(j*batch_sz + batch_sz),:] + x = tmpX[j*batch_sz:(j + 1)*batch_sz,:] + y = tmpY[j*batch_sz:(j + 1)*batch_sz,:] p_y = forward(x, W, b) - W += lr*(gradW(y, p_y, x) - reg*W) - b += lr*(gradb(y, p_y) - reg*b) - - p_y_test = forward(Xtest, W, b) - ll = cost(p_y_test, Ytest_ind) - LL_batch.append(ll) - if i % 1 == 0: - err = error_rate(p_y_test, Ytest) - if i % 10 == 0: - print("Cost at iteration %d: %.6f" % (i, ll)) - print("Error rate:", err) + current_batch_sz = len(x) + gW = gradW(y, p_y, x) / current_batch_sz + gb = gradb(y, p_y) / current_batch_sz + + W += lr*(gW - reg*W) + b += lr*(gb - reg*b) + + dt = (datetime.now() - t0).total_seconds() + dt2 = dt - last_dt_calculated_loss + + if dt2 > avg_interval_dt: + last_dt_calculated_loss = dt + p_y_test = forward(Xtest, W, b) + test_loss = cost(p_y_test, Ytest_ind) + test_losses_batch.append([dt, test_loss]) + + # time to quit + if dt > max_dt: + done = True + break + if done: + break + + if (i + 1) % 10 == 0: + print("Cost at iteration %d: %.6f" % (i + 1, test_loss)) p_y = forward(Xtest, W, b) print("Final error rate:", error_rate(p_y, Ytest)) - print("Elapsted time for batch GD:", datetime.now() - t0) + print("Elapsted time for mini-batch GD:", datetime.now() - t0) + # convert to numpy arrays + test_losses_full = np.array(test_losses_full) + test_losses_sgd = np.array(test_losses_sgd) + test_losses_batch = np.array(test_losses_batch) - x1 = np.linspace(0, 1, len(LL)) - plt.plot(x1, LL, label="full") - x2 = np.linspace(0, 1, len(LL_stochastic)) - plt.plot(x2, LL_stochastic, label="stochastic") - x3 = np.linspace(0, 1, len(LL_batch)) - plt.plot(x3, LL_batch, label="batch") + + plt.plot(test_losses_full[:,0], test_losses_full[:,1], label="full") + plt.plot(test_losses_sgd[:,0], test_losses_sgd[:,1], label="sgd") + plt.plot(test_losses_batch[:,0], test_losses_batch[:,1], label="mini-batch") plt.legend() plt.show() diff --git a/ann_logistic_extra/ann_train.py b/ann_logistic_extra/ann_train.py index 5c84a7e4..15710e76 100644 --- a/ann_logistic_extra/ann_train.py +++ b/ann_logistic_extra/ann_train.py @@ -48,8 +48,8 @@ def predict(P_Y_given_X): def classification_rate(Y, P): return np.mean(Y == P) -def cross_entropy(T, pY): - return -np.mean(T*np.log(pY)) +def cross_entropy(Y, pY): + return -np.sum(Y * np.log(pY)) / len(T) # train loop @@ -66,18 +66,22 @@ def cross_entropy(T, pY): test_costs.append(ctest) # gradient descent - W2 -= learning_rate*Ztrain.T.dot(pYtrain - Ytrain_ind) - b2 -= learning_rate*(pYtrain - Ytrain_ind).sum(axis=0) - dZ = (pYtrain - Ytrain_ind).dot(W2.T) * (1 - Ztrain*Ztrain) - W1 -= learning_rate*Xtrain.T.dot(dZ) - b1 -= learning_rate*dZ.sum(axis=0) + gW2 = Ztrain.T.dot(pYtrain - Ytrain_ind) + gb2 = (pYtrain - Ytrain_ind).sum(axis=0) + dZ = (pYtrain - Ytrain_ind).dot(W2.T) * (1 - Ztrain * Ztrain) + gW1 = Xtrain.T.dot(dZ) + gb1 = dZ.sum(axis=0) + W2 -= learning_rate * gW2 + b2 -= learning_rate * gb2 + W1 -= learning_rate * gW1 + b1 -= learning_rate * gb1 if i % 1000 == 0: print(i, ctrain, ctest) print("Final train classification_rate:", classification_rate(Ytrain, predict(pYtrain))) print("Final test classification_rate:", classification_rate(Ytest, predict(pYtest))) -legend1, = plt.plot(train_costs, label='train cost') -legend2, = plt.plot(test_costs, label='test cost') -plt.legend([legend1, legend2]) +plt.plot(train_costs, label='train cost') +plt.plot(test_costs, label='test cost') +plt.legend() plt.show() \ No newline at end of file diff --git a/ann_logistic_extra/logistic_softmax_train.py b/ann_logistic_extra/logistic_softmax_train.py index 2bdc114f..94874f14 100644 --- a/ann_logistic_extra/logistic_softmax_train.py +++ b/ann_logistic_extra/logistic_softmax_train.py @@ -44,8 +44,8 @@ def predict(P_Y_given_X): def classification_rate(Y, P): return np.mean(Y == P) -def cross_entropy(T, pY): - return -np.mean(T*np.log(pY)) +def cross_entropy(Y, pY): + return -np.sum(Y * np.log(pY)) / len(Y) # train loop @@ -70,7 +70,7 @@ def cross_entropy(T, pY): print("Final train classification_rate:", classification_rate(Ytrain, predict(pYtrain))) print("Final test classification_rate:", classification_rate(Ytest, predict(pYtest))) -legend1, = plt.plot(train_costs, label='train cost') -legend2, = plt.plot(test_costs, label='test cost') -plt.legend([legend1, legend2]) +plt.plot(train_costs, label='train cost') +plt.plot(test_costs, label='test cost') +plt.legend() plt.show() \ No newline at end of file diff --git a/ann_logistic_extra/logistic_train.py b/ann_logistic_extra/logistic_train.py index c9a22815..abedd5ba 100644 --- a/ann_logistic_extra/logistic_train.py +++ b/ann_logistic_extra/logistic_train.py @@ -55,9 +55,9 @@ def cross_entropy(T, pY): print("Final train classification_rate:", classification_rate(Ytrain, np.round(pYtrain))) print("Final test classification_rate:", classification_rate(Ytest, np.round(pYtest))) -legend1, = plt.plot(train_costs, label='train cost') -legend2, = plt.plot(test_costs, label='test cost') -plt.legend([legend1, legend2]) +plt.plot(train_costs, label='train cost') +plt.plot(test_costs, label='test cost') +plt.legend() plt.show() diff --git a/ann_logistic_extra/process.py b/ann_logistic_extra/process.py index 785755b7..0048f9e0 100644 --- a/ann_logistic_extra/process.py +++ b/ann_logistic_extra/process.py @@ -21,7 +21,7 @@ def get_data(): # df.head() # easier to work with numpy array - data = df.values + data = df.to_numpy() # shuffle it np.random.shuffle(data) diff --git a/calculus/WHERE ARE THE NOTEBOOKS.txt b/calculus/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/calculus/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/calculus/extra_reading.txt b/calculus/extra_reading.txt new file mode 100644 index 00000000..404cc6d0 --- /dev/null +++ b/calculus/extra_reading.txt @@ -0,0 +1,2 @@ +Calculus: Early Transcendentals +https://amzn.to/3Kwmabe \ No newline at end of file diff --git a/chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt b/chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/chatgpt_trading/extra_reading.txt b/chatgpt_trading/extra_reading.txt new file mode 100644 index 00000000..59a5da5d --- /dev/null +++ b/chatgpt_trading/extra_reading.txt @@ -0,0 +1,5 @@ +ARIMA (for mean reversion) +https://deeplearningcourses.com/c/time-series-analysis + +Financial Engineering +https://deeplearningcourses.com/c/ai-finance \ No newline at end of file diff --git a/cnn_class/WHERE ARE THE NOTEBOOKS.txt b/cnn_class/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/cnn_class/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/cnn_class/cnn_tf.py b/cnn_class/cnn_tf.py index ec768d75..284910e8 100644 --- a/cnn_class/cnn_tf.py +++ b/cnn_class/cnn_tf.py @@ -32,7 +32,6 @@ def convpool(X, W, b): def init_filter(shape, poolsz): - # w = np.random.randn(*shape) * np.sqrt(2) / np.sqrt(np.prod(shape[:-1]) + shape[-1]*np.prod(shape[:-2]) / np.prod(poolsz)) w = np.random.randn(*shape) * np.sqrt(2.0 / np.prod(shape[:-1])) return w.astype(np.float32) diff --git a/cnn_class/cnn_tf_plot_filters.py b/cnn_class/cnn_tf_plot_filters.py index 8be13efe..8ab88671 100644 --- a/cnn_class/cnn_tf_plot_filters.py +++ b/cnn_class/cnn_tf_plot_filters.py @@ -32,7 +32,6 @@ def convpool(X, W, b): def init_filter(shape, poolsz): - # w = np.random.randn(*shape) * np.sqrt(2) / np.sqrt(np.prod(shape[:-1]) + shape[-1]*np.prod(shape[:-2]) / np.prod(poolsz)) w = np.random.randn(*shape) * np.sqrt(2.0 / np.prod(shape[:-1])) return w.astype(np.float32) diff --git a/cnn_class/cnn_theano.py b/cnn_class/cnn_theano.py index 311577d6..41a297b9 100644 --- a/cnn_class/cnn_theano.py +++ b/cnn_class/cnn_theano.py @@ -44,7 +44,6 @@ def convpool(X, W, b, poolsize=(2, 2)): def init_filter(shape, poolsz): - # w = np.random.randn(*shape) / np.sqrt(np.prod(shape[1:]) + shape[0]*np.prod(shape[2:]) / np.prod(poolsz)) w = np.random.randn(*shape) * np.sqrt(2.0 / np.prod(shape[1:])) return w.astype(np.float32) @@ -82,8 +81,8 @@ def main(): max_iter = 6 print_period = 10 - lr = np.float32(1e-2) - mu = np.float32(0.99) + lr = np.float32(1e-3) + mu = np.float32(0.9) N = Xtrain.shape[0] batch_sz = 500 diff --git a/cnn_class/exercises.txt b/cnn_class/exercises.txt new file mode 100644 index 00000000..81a2e5a4 --- /dev/null +++ b/cnn_class/exercises.txt @@ -0,0 +1,20 @@ +Logistic Regression +https://www.kaggle.com/uciml/pima-indians-diabetes-database +https://lazyprogrammer.me/course_files/exercises/diabetes.csv + +Linear Regression +https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html +https://lazyprogrammer.me/course_files/exercises/boston.txt + +ANN +https://archive.ics.uci.edu/ml/datasets/ecoli (orig) +https://www.kaggle.com/elikplim/ecoli-data-set (alt) +https://lazyprogrammer.me/course_files/exercises/ecoli.csv + +CNN +https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge +https://archive.org/download/fer2013_202311/fer2013.csv + +NLP +https://www.kaggle.com/crowdflower/twitter-airline-sentiment +https://lazyprogrammer.me/course_files/exercises/AirlineSentimentTweets.csv \ No newline at end of file diff --git a/cnn_class/extra_reading.txt b/cnn_class/extra_reading.txt new file mode 100644 index 00000000..ed16dc09 --- /dev/null +++ b/cnn_class/extra_reading.txt @@ -0,0 +1,17 @@ +Gradient-Based Learning Applied to Document Recognition +http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf + +ImageNet Classification with Deep Convolutional Neural Networks +https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf + +Convolution arithmetic tutorial +https://theano-pymc.readthedocs.io/en/latest/tutorial/conv_arithmetic.html + +Very Deep Convolutional Networks for Large-Scale Visual Recognition +http://www.robots.ox.ac.uk/~vgg/research/very_deep/ + +ImageNet Classification with Deep Convolutional Neural Networks +http://image-net.org/challenges/LSVRC/2012/supervision.pdf + +Going deeper with convolutions +https://arxiv.org/pdf/1409.4842.pdf \ No newline at end of file diff --git a/cnn_class/keras_example.py b/cnn_class/keras_example.py index 4667d84f..d0463588 100644 --- a/cnn_class/keras_example.py +++ b/cnn_class/keras_example.py @@ -113,8 +113,8 @@ def rearrange(X): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/cnn_class2/WHERE ARE THE NOTEBOOKS.txt b/cnn_class2/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/cnn_class2/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/cnn_class2/extra_reading.txt b/cnn_class2/extra_reading.txt index d68f40bb..593bc0cc 100644 --- a/cnn_class2/extra_reading.txt +++ b/cnn_class2/extra_reading.txt @@ -11,4 +11,10 @@ Deep Residual Learning for Image Recognition https://arxiv.org/abs/1512.03385 Going Deeper with Convolutions (Inception) -https://arxiv.org/abs/1409.4842 \ No newline at end of file +https://arxiv.org/abs/1409.4842 + +Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift +https://arxiv.org/abs/1502.03167 + +Deep learning improved by biological activation functions +https://arxiv.org/pdf/1804.11237.pdf \ No newline at end of file diff --git a/cnn_class2/fashion.py b/cnn_class2/fashion.py index 858be20e..f1be1654 100644 --- a/cnn_class2/fashion.py +++ b/cnn_class2/fashion.py @@ -101,8 +101,8 @@ def y2indicator(Y): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/cnn_class2/fashion2.py b/cnn_class2/fashion2.py index 4d2d22d6..d035e974 100644 --- a/cnn_class2/fashion2.py +++ b/cnn_class2/fashion2.py @@ -96,8 +96,8 @@ def y2indicator(Y): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/cnn_class2/siamese.py b/cnn_class2/siamese.py new file mode 100644 index 00000000..4c43f163 --- /dev/null +++ b/cnn_class2/siamese.py @@ -0,0 +1,443 @@ +# https://deeplearningcourses.com/c/advanced-computer-vision +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.layers import Input, Lambda, Dense, Flatten, Conv2D, BatchNormalization, Activation, MaxPooling2D +from keras.models import Model +from keras.preprocessing import image + +import keras.backend as K + +import numpy as np +import matplotlib.pyplot as plt + +from glob import glob +from collections import Counter + + +# get the data from: http://vision.ucsd.edu/content/yale-face-database +files = glob('../large_files/yalefaces/subject*') + +# easier to randomize later +np.random.shuffle(files) + +# number of samples +N = len(files) + + +def load_img(filepath): + # load image and downsample + img = image.img_to_array(image.load_img(filepath, target_size=[60, 80])).astype('uint8') + return img + + + +# look at an image for fun +img = load_img(np.random.choice(files)) +plt.imshow(img) +plt.show() + + +# try load images as arrays +# yes, I cheated and checked beforehand that all the images were the same shape! +shape = [N] + list(img.shape) +images = np.zeros(shape) +for i, f in enumerate(files): + # img = image.img_to_array(image.load_img(f)).astype('uint8') + img = load_img(f) + images[i] = img + + +# make the labels +# all the filenames are something like 'subject13.happy' +labels = np.zeros(N) +for i, f in enumerate(files): + filename = f.rsplit('/', 1)[-1] + subject_num = filename.split('.', 1)[0] + + # subtract 1 since the filenames start from 1 + idx = int(subject_num.replace('subject', '')) - 1 + labels[i] = idx + + +# how many of each subject do we have? +label_count = Counter(labels) + +# set of unique labels +unique_labels = set(label_count.keys()) + +# get the number of subjects +n_subjects = len(label_count) + +# let's make it so 3 images for each subject are test data +# number of test points is then +n_test = 3 * n_subjects +n_train = N - n_test + + +# initialize arrays to hold train and test images +train_images = np.zeros([n_train] + list(img.shape)) +train_labels = np.zeros(n_train) +test_images = np.zeros([n_test] + list(img.shape)) +test_labels = np.zeros(n_test) + + +count_so_far = {} +train_idx = 0 +test_idx = 0 +for img, label in zip(images, labels): + # increment the count + count_so_far[label] = count_so_far.get(label, 0) + 1 + + if count_so_far[label] > 3: + # we have already added 3 test images for this subject + # so add the rest to train + train_images[train_idx] = img + train_labels[train_idx] = label + train_idx += 1 + + else: + # add the first 3 images to test + test_images[test_idx] = img + test_labels[test_idx] = label + test_idx += 1 + + +# create label2idx mapping for easy access +train_label2idx = {} +test_label2idx = {} + +for i, label in enumerate(train_labels): + if label not in train_label2idx: + train_label2idx[label] = [i] + else: + train_label2idx[label].append(i) + +for i, label in enumerate(test_labels): + if label not in test_label2idx: + test_label2idx[label] = [i] + else: + test_label2idx[label].append(i) + + +# come up with all possible training sample indices +train_positives = [] +train_negatives = [] +test_positives = [] +test_negatives = [] + +for label, indices in train_label2idx.items(): + # all indices that do NOT belong to this subject + other_indices = set(range(n_train)) - set(indices) + + for i, idx1 in enumerate(indices): + for idx2 in indices[i+1:]: + train_positives.append((idx1, idx2)) + + for idx2 in other_indices: + train_negatives.append((idx1, idx2)) + +for label, indices in test_label2idx.items(): + # all indices that do NOT belong to this subject + other_indices = set(range(n_test)) - set(indices) + + for i, idx1 in enumerate(indices): + for idx2 in indices[i+1:]: + test_positives.append((idx1, idx2)) + + for idx2 in other_indices: + test_negatives.append((idx1, idx2)) + + +batch_size = 64 +def train_generator(): + # for each batch, we will send 1 pair of each subject + # and the same number of non-matching pairs + n_batches = int(np.ceil(len(train_positives) / batch_size)) + + while True: + np.random.shuffle(train_positives) + + n_samples = batch_size * 2 + shape = [n_samples] + list(img.shape) + x_batch_1 = np.zeros(shape) + x_batch_2 = np.zeros(shape) + y_batch = np.zeros(n_samples) + + for i in range(n_batches): + pos_batch_indices = train_positives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in pos_batch_indices: + x_batch_1[j] = train_images[idx1] + x_batch_2[j] = train_images[idx2] + y_batch[j] = 1 # match + j += 1 + + # get negative samples + neg_indices = np.random.choice(len(train_negatives), size=len(pos_batch_indices), replace=False) + for neg in neg_indices: + idx1, idx2 = train_negatives[neg] + x_batch_1[j] = train_images[idx1] + x_batch_2[j] = train_images[idx2] + y_batch[j] = 0 # non-match + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + y = y_batch[:j] + yield [x1, x2], y + + +# same thing as the train generator except no shuffling and it uses the test set +def test_generator(): + n_batches = int(np.ceil(len(test_positives) / batch_size)) + + while True: + n_samples = batch_size * 2 + shape = [n_samples] + list(img.shape) + x_batch_1 = np.zeros(shape) + x_batch_2 = np.zeros(shape) + y_batch = np.zeros(n_samples) + + for i in range(n_batches): + pos_batch_indices = test_positives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in pos_batch_indices: + x_batch_1[j] = test_images[idx1] + x_batch_2[j] = test_images[idx2] + y_batch[j] = 1 # match + j += 1 + + # get negative samples + neg_indices = np.random.choice(len(test_negatives), size=len(pos_batch_indices), replace=False) + for neg in neg_indices: + idx1, idx2 = test_negatives[neg] + x_batch_1[j] = test_images[idx1] + x_batch_2[j] = test_images[idx2] + y_batch[j] = 0 # non-match + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + y = y_batch[:j] + yield [x1, x2], y + + + + +# build the base neural network +i = Input(shape=img.shape) +x = Conv2D(filters=32, kernel_size=(3, 3))(i) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Conv2D(filters=64, kernel_size=(3, 3))(x) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Flatten()(x) +x = Dense(units=128, activation='relu')(x) +x = Dense(units=50)(x) # feature vector + +cnn = Model(inputs=i, outputs=x) + + +# feed both images into the same CNN +img_placeholder1 = Input(shape=img.shape) +img_placeholder2 = Input(shape=img.shape) + +# get image features +feat1 = cnn(img_placeholder1) +feat2 = cnn(img_placeholder2) + + +# calculate the Euclidean distance between feature 1 and feature 2 +def euclidean_distance(features): + x, y = features + return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True)) + + +# lambda layer to output distance between feat1 and feat2 +dist_layer = Lambda(euclidean_distance)([feat1, feat2]) + + +# the model we will actually train +model = Model(inputs=[img_placeholder1, img_placeholder2], outputs=dist_layer) + + +# loss function for siamese network +def contrastive_loss(y_true, y_pred): + margin = 1 + return K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0))) + + +# compile the model +model.compile( + loss=contrastive_loss, + optimizer='adam', +) + + +# calculate accuracy before training +# since the dataset is imbalanced, we'll report tp, tn, fp, fn +def get_train_accuracy(threshold=0.85): + positive_distances = [] + negative_distances = [] + + tp = 0 + tn = 0 + fp = 0 + fn = 0 + + batch_size = 64 + x_batch_1 = np.zeros([batch_size] + list(img.shape)) + x_batch_2 = np.zeros([batch_size] + list(img.shape)) + n_batches = int(np.ceil(len(train_positives) / batch_size)) + for i in range(n_batches): + print(f"pos batch: {i+1}/{n_batches}") + pos_batch_indices = train_positives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in pos_batch_indices: + x_batch_1[j] = train_images[idx1] + x_batch_2[j] = train_images[idx2] + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + distances = model.predict([x1, x2]).flatten() + positive_distances += distances.tolist() + + # update tp, tn, fp, fn + tp += (distances < threshold).sum() + fn += (distances > threshold).sum() + + n_batches = int(np.ceil(len(train_negatives) / batch_size)) + for i in range(n_batches): + print(f"neg batch: {i+1}/{n_batches}") + neg_batch_indices = train_negatives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in neg_batch_indices: + x_batch_1[j] = train_images[idx1] + x_batch_2[j] = train_images[idx2] + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + distances = model.predict([x1, x2]).flatten() + negative_distances += distances.tolist() + + # update tp, tn, fp, fn + fp += (distances < threshold).sum() + tn += (distances > threshold).sum() + + tpr = tp / (tp + fn) + tnr = tn / (tn + fp) + print(f"sensitivity (tpr): {tpr}, specificity (tnr): {tnr}") + + plt.hist(negative_distances, bins=20, density=True, label='negative_distances') + plt.hist(positive_distances, bins=20, density=True, label='positive_distances') + plt.legend() + plt.show() + + + +def get_test_accuracy(threshold=0.85): + positive_distances = [] + negative_distances = [] + + tp = 0 + tn = 0 + fp = 0 + fn = 0 + + batch_size = 64 + x_batch_1 = np.zeros([batch_size] + list(img.shape)) + x_batch_2 = np.zeros([batch_size] + list(img.shape)) + n_batches = int(np.ceil(len(test_positives) / batch_size)) + for i in range(n_batches): + print(f"pos batch: {i+1}/{n_batches}") + pos_batch_indices = test_positives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in pos_batch_indices: + x_batch_1[j] = test_images[idx1] + x_batch_2[j] = test_images[idx2] + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + distances = model.predict([x1, x2]).flatten() + positive_distances += distances.tolist() + + # update tp, tn, fp, fn + tp += (distances < threshold).sum() + fn += (distances > threshold).sum() + + n_batches = int(np.ceil(len(test_negatives) / batch_size)) + for i in range(n_batches): + print(f"neg batch: {i+1}/{n_batches}") + neg_batch_indices = test_negatives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in neg_batch_indices: + x_batch_1[j] = test_images[idx1] + x_batch_2[j] = test_images[idx2] + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + distances = model.predict([x1, x2]).flatten() + negative_distances += distances.tolist() + + # update tp, tn, fp, fn + fp += (distances < threshold).sum() + tn += (distances > threshold).sum() + + + tpr = tp / (tp + fn) + tnr = tn / (tn + fp) + print(f"sensitivity (tpr): {tpr}, specificity (tnr): {tnr}") + + plt.hist(negative_distances, bins=20, density=True, label='negative_distances') + plt.hist(positive_distances, bins=20, density=True, label='positive_distances') + plt.legend() + plt.show() + + + + +# params for training +train_steps = int(np.ceil(len(train_positives) * 2 / batch_size)) +valid_steps = int(np.ceil(len(test_positives) * 2 / batch_size)) + +# fit the model +r = model.fit( + train_generator(), + steps_per_epoch=train_steps, + epochs=20, + validation_data=test_generator(), + validation_steps=valid_steps, +) + +# plot the loss +plt.plot(r.history['loss'], label='train loss') +plt.plot(r.history['val_loss'], label='val loss') +plt.legend() +plt.show() + +get_train_accuracy() +get_test_accuracy() diff --git a/cnn_class2/style_transfer1.py b/cnn_class2/style_transfer1.py index 46d12d3d..421a0a19 100644 --- a/cnn_class2/style_transfer1.py +++ b/cnn_class2/style_transfer1.py @@ -24,21 +24,34 @@ from scipy.optimize import fmin_l_bfgs_b +import tensorflow as tf +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + def VGG16_AvgPool(shape): # we want to account for features across the entire image # so get rid of the maxpool which throws away information vgg = VGG16(input_shape=shape, weights='imagenet', include_top=False) - new_model = Sequential() + # new_model = Sequential() + # for layer in vgg.layers: + # if layer.__class__ == MaxPooling2D: + # # replace it with average pooling + # new_model.add(AveragePooling2D()) + # else: + # new_model.add(layer) + + i = vgg.input + x = i for layer in vgg.layers: if layer.__class__ == MaxPooling2D: # replace it with average pooling - new_model.add(AveragePooling2D()) + x = AveragePooling2D()(x) else: - new_model.add(layer) + x = layer(x) - return new_model + return Model(i, x) def VGG16_AvgPool_CutOff(shape, num_convs): # there are 13 convolutions in total @@ -50,16 +63,25 @@ def VGG16_AvgPool_CutOff(shape, num_convs): return None model = VGG16_AvgPool(shape) - new_model = Sequential() + # new_model = Sequential() + # n = 0 + # for layer in model.layers: + # if layer.__class__ == Conv2D: + # n += 1 + # new_model.add(layer) + # if n >= num_convs: + # break + n = 0 + output = None for layer in model.layers: if layer.__class__ == Conv2D: n += 1 - new_model.add(layer) if n >= num_convs: + output = layer.output break - return new_model + return Model(model.input, output) def unpreprocess(img): diff --git a/cnn_class2/use_pretrained_weights_resnet.py b/cnn_class2/use_pretrained_weights_resnet.py index 48c6bc23..8f3aae71 100644 --- a/cnn_class2/use_pretrained_weights_resnet.py +++ b/cnn_class2/use_pretrained_weights_resnet.py @@ -8,7 +8,7 @@ from keras.layers import Input, Lambda, Dense, Flatten from keras.models import Model -from keras.applications.resnet50 import ResNet50, preprocess_input +from keras.applications.resnet import ResNet50, preprocess_input # from keras.applications.inception_v3 import InceptionV3, preprocess_input from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator @@ -21,21 +21,21 @@ # re-size all the images to this -IMAGE_SIZE = [224, 224] # feel free to change depending on dataset +IMAGE_SIZE = [100, 100] # feel free to change depending on dataset # training config: epochs = 16 batch_size = 32 # https://www.kaggle.com/paultimothymooney/blood-cells -train_path = '../large_files/blood_cell_images/TRAIN' -valid_path = '../large_files/blood_cell_images/TEST' +# train_path = '../large_files/blood_cell_images/TRAIN' +# valid_path = '../large_files/blood_cell_images/TEST' # https://www.kaggle.com/moltean/fruits # train_path = '../large_files/fruits-360/Training' # valid_path = '../large_files/fruits-360/Validation' -# train_path = '../large_files/fruits-360-small/Training' -# valid_path = '../large_files/fruits-360-small/Validation' +train_path = '../large_files/fruits-360-small/Training' +valid_path = '../large_files/fruits-360-small/Validation' # useful for getting number of files image_files = glob(train_path + '/*/*.jp*g') @@ -125,7 +125,7 @@ # fit the model -r = model.fit_generator( +r = model.fit( train_generator, validation_data=valid_generator, epochs=epochs, @@ -173,8 +173,8 @@ def get_confusion_matrix(data_path, N): plt.show() # accuracies -plt.plot(r.history['acc'], label='train acc') -plt.plot(r.history['val_acc'], label='val acc') +plt.plot(r.history['accuracy'], label='train acc') +plt.plot(r.history['val_accuracy'], label='val acc') plt.legend() plt.show() diff --git a/cnn_class2/use_pretrained_weights_vgg.py b/cnn_class2/use_pretrained_weights_vgg.py index 01cbd619..849dd9f6 100644 --- a/cnn_class2/use_pretrained_weights_vgg.py +++ b/cnn_class2/use_pretrained_weights_vgg.py @@ -31,10 +31,10 @@ # valid_path = '../large_files/blood_cell_images/TEST' # https://www.kaggle.com/moltean/fruits -# train_path = '../large_files/fruits-360/Training' -# valid_path = '../large_files/fruits-360/Validation' -train_path = '../large_files/fruits-360-small/Training' -valid_path = '../large_files/fruits-360-small/Validation' +train_path = '../large_files/fruits-360/Training' +valid_path = '../large_files/fruits-360/Validation' +# train_path = '../large_files/fruits-360-small/Training' +# valid_path = '../large_files/fruits-360-small/Validation' # useful for getting number of files image_files = glob(train_path + '/*/*.jp*g') @@ -45,7 +45,7 @@ # look at an image for fun -plt.imshow(image.load_img(np.random.choice(image_files))) +plt.imshow(image.img_to_array(image.load_img(np.random.choice(image_files))).astype('uint8')) plt.show() @@ -76,7 +76,6 @@ ) - # create an instance of ImageDataGenerator gen = ImageDataGenerator( rotation_range=20, @@ -124,7 +123,7 @@ # fit the model -r = model.fit_generator( +r = model.fit( train_generator, validation_data=valid_generator, epochs=epochs, @@ -172,8 +171,8 @@ def get_confusion_matrix(data_path, N): plt.show() # accuracies -plt.plot(r.history['acc'], label='train acc') -plt.plot(r.history['val_acc'], label='val acc') +plt.plot(r.history['accuracy'], label='train acc') +plt.plot(r.history['val_accuracy'], label='val acc') plt.legend() plt.show() diff --git a/financial_engineering/go_here_instead.txt b/financial_engineering/go_here_instead.txt new file mode 100644 index 00000000..614b0afd --- /dev/null +++ b/financial_engineering/go_here_instead.txt @@ -0,0 +1 @@ +https://github.com/lazyprogrammer/financial_engineering diff --git a/hmm_class/extra_reading.txt b/hmm_class/extra_reading.txt new file mode 100644 index 00000000..c0bb93d1 --- /dev/null +++ b/hmm_class/extra_reading.txt @@ -0,0 +1,8 @@ +A Tutorial on Hidden Markov Models and Selected Applications in Speech Recognition +https://www.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf + +Some Mathematics for HMM by Dawei Shen +https://pdfs.semanticscholar.org/4ce1/9ab0e07da9aa10be1c336400c8e4d8fc36c5.pdf + +A Revealing Introduction to Hidden Markov Models +https://www.cs.sjsu.edu/~stamp/RUA/HMM.pdf \ No newline at end of file diff --git a/hmm_class/hmmc_theano.py b/hmm_class/hmmc_theano.py index a68787cb..57d96ff1 100644 --- a/hmm_class/hmmc_theano.py +++ b/hmm_class/hmmc_theano.py @@ -28,8 +28,7 @@ def __init__(self, M, K): self.K = K # number of Gaussians def fit(self, X, learning_rate=1e-2, max_iter=10): - # train the HMM model using the Baum-Welch algorithm - # a specific instance of the expectation-maximization algorithm + # train the HMM model using gradient descent N = len(X) D = X[0].shape[1] # assume each x is organized (T, D) diff --git a/hmm_class/hmmc_theano2.py b/hmm_class/hmmc_theano2.py index 865e89b0..647c8173 100644 --- a/hmm_class/hmmc_theano2.py +++ b/hmm_class/hmmc_theano2.py @@ -30,8 +30,7 @@ def __init__(self, M, K): self.K = K # number of Gaussians def fit(self, X, learning_rate=1e-2, max_iter=10): - # train the HMM model using the Baum-Welch algorithm - # a specific instance of the expectation-maximization algorithm + # train the HMM model using gradient descent N = len(X) D = X[0].shape[1] # assume each x is organized (T, D) diff --git a/hmm_class/sites.py b/hmm_class/sites.py index 617863f5..0187e03a 100644 --- a/hmm_class/sites.py +++ b/hmm_class/sites.py @@ -2,6 +2,8 @@ # https://udemy.com/unsupervised-machine-learning-hidden-markov-models-in-python # http://lazyprogrammer.me # Create a Markov model for site data. +from __future__ import print_function, division +from future.utils import iteritems import numpy as np transitions = {} @@ -14,19 +16,19 @@ row_sums[s] = row_sums.get(s, 0.) + 1 # normalize -for k, v in transitions.iteritems(): +for k, v in iteritems(transitions): s, e = k transitions[k] = v / row_sums[s] # initial state distribution -print "initial state distribution:" -for k, v in transitions.iteritems(): +print("initial state distribution:") +for k, v in iteritems(transitions): s, e = k if s == '-1': - print e, v + print(e, v) # which page has the highest bounce? -for k, v in transitions.iteritems(): +for k, v in iteritems(transitions): s, e = k if e == 'B': - print "bounce rate for %s: %s" % (s, v) + print("bounce rate for %s: %s" % (s, v)) diff --git a/kerascv/extra_reading.txt b/kerascv/extra_reading.txt new file mode 100644 index 00000000..57ebec98 --- /dev/null +++ b/kerascv/extra_reading.txt @@ -0,0 +1,8 @@ +KerasCV List of Models +https://keras.io/api/keras_cv/models/ + +Fast R-CNN (Ross Girshick) +https://arxiv.org/pdf/1504.08083.pdf + +Focal Loss for Dense Object Detection (Lin et al.) +https://arxiv.org/abs/1708.02002 \ No newline at end of file diff --git a/kerascv/imagenet_label_names.json b/kerascv/imagenet_label_names.json new file mode 100644 index 00000000..37eeb166 --- /dev/null +++ b/kerascv/imagenet_label_names.json @@ -0,0 +1,1000 @@ +["tench", +"goldfish", +"great white shark", +"tiger shark", +"hammerhead shark", +"electric ray", +"stingray", +"cock", +"hen", +"ostrich", +"brambling", +"goldfinch", +"house finch", +"junco", +"indigo bunting", +"American robin", +"bulbul", +"jay", +"magpie", +"chickadee", +"American dipper", +"kite", +"bald eagle", +"vulture", +"great grey owl", +"fire salamander", +"smooth newt", +"newt", +"spotted salamander", +"axolotl", +"American bullfrog", +"tree frog", +"tailed frog", +"loggerhead sea turtle", +"leatherback sea turtle", +"mud turtle", +"terrapin", +"box turtle", +"banded gecko", +"green iguana", +"Carolina anole", +"desert grassland whiptail lizard", +"agama", +"frilled-necked lizard", +"alligator lizard", +"Gila monster", +"European green lizard", +"chameleon", +"Komodo dragon", +"Nile crocodile", +"American alligator", +"triceratops", +"worm snake", +"ring-necked snake", +"eastern hog-nosed snake", +"smooth green snake", +"kingsnake", +"garter snake", +"water snake", +"vine snake", +"night snake", +"boa constrictor", +"African rock python", +"Indian cobra", +"green mamba", +"sea snake", +"Saharan horned viper", +"eastern diamondback rattlesnake", +"sidewinder", +"trilobite", +"harvestman", +"scorpion", +"yellow garden spider", +"barn spider", +"European garden spider", +"southern black widow", +"tarantula", +"wolf spider", +"tick", +"centipede", +"black grouse", +"ptarmigan", +"ruffed grouse", +"prairie grouse", +"peacock", +"quail", +"partridge", +"grey parrot", +"macaw", +"sulphur-crested cockatoo", +"lorikeet", +"coucal", +"bee eater", +"hornbill", +"hummingbird", +"jacamar", +"toucan", +"duck", +"red-breasted merganser", +"goose", +"black swan", +"tusker", +"echidna", +"platypus", +"wallaby", +"koala", +"wombat", +"jellyfish", +"sea anemone", +"brain coral", +"flatworm", +"nematode", +"conch", +"snail", +"slug", +"sea slug", +"chiton", +"chambered nautilus", +"Dungeness crab", +"rock crab", +"fiddler crab", +"red king crab", +"American lobster", +"spiny lobster", +"crayfish", +"hermit crab", +"isopod", +"white stork", +"black stork", +"spoonbill", +"flamingo", +"little blue heron", +"great egret", +"bittern", +"crane (bird)", +"limpkin", +"common gallinule", +"American coot", +"bustard", +"ruddy turnstone", +"dunlin", +"common redshank", +"dowitcher", +"oystercatcher", +"pelican", +"king penguin", +"albatross", +"grey whale", +"killer whale", +"dugong", +"sea lion", +"Chihuahua", +"Japanese Chin", +"Maltese", +"Pekingese", +"Shih Tzu", +"King Charles Spaniel", +"Papillon", +"toy terrier", +"Rhodesian Ridgeback", +"Afghan Hound", +"Basset Hound", +"Beagle", +"Bloodhound", +"Bluetick Coonhound", +"Black and Tan Coonhound", +"Treeing Walker Coonhound", +"English foxhound", +"Redbone Coonhound", +"borzoi", +"Irish Wolfhound", +"Italian Greyhound", +"Whippet", +"Ibizan Hound", +"Norwegian Elkhound", +"Otterhound", +"Saluki", +"Scottish Deerhound", +"Weimaraner", +"Staffordshire Bull Terrier", +"American Staffordshire Terrier", +"Bedlington Terrier", +"Border Terrier", +"Kerry Blue Terrier", +"Irish Terrier", +"Norfolk Terrier", +"Norwich Terrier", +"Yorkshire Terrier", +"Wire Fox Terrier", +"Lakeland Terrier", +"Sealyham Terrier", +"Airedale Terrier", +"Cairn Terrier", +"Australian Terrier", +"Dandie Dinmont Terrier", +"Boston Terrier", +"Miniature Schnauzer", +"Giant Schnauzer", +"Standard Schnauzer", +"Scottish Terrier", +"Tibetan Terrier", +"Australian Silky Terrier", +"Soft-coated Wheaten Terrier", +"West Highland White Terrier", +"Lhasa Apso", +"Flat-Coated Retriever", +"Curly-coated Retriever", +"Golden Retriever", +"Labrador Retriever", +"Chesapeake Bay Retriever", +"German Shorthaired Pointer", +"Vizsla", +"English Setter", +"Irish Setter", +"Gordon Setter", +"Brittany", +"Clumber Spaniel", +"English Springer Spaniel", +"Welsh Springer Spaniel", +"Cocker Spaniels", +"Sussex Spaniel", +"Irish Water Spaniel", +"Kuvasz", +"Schipperke", +"Groenendael", +"Malinois", +"Briard", +"Australian Kelpie", +"Komondor", +"Old English Sheepdog", +"Shetland Sheepdog", +"collie", +"Border Collie", +"Bouvier des Flandres", +"Rottweiler", +"German Shepherd Dog", +"Dobermann", +"Miniature Pinscher", +"Greater Swiss Mountain Dog", +"Bernese Mountain Dog", +"Appenzeller Sennenhund", +"Entlebucher Sennenhund", +"Boxer", +"Bullmastiff", +"Tibetan Mastiff", +"French Bulldog", +"Great Dane", +"St. Bernard", +"husky", +"Alaskan Malamute", +"Siberian Husky", +"Dalmatian", +"Affenpinscher", +"Basenji", +"pug", +"Leonberger", +"Newfoundland", +"Pyrenean Mountain Dog", +"Samoyed", +"Pomeranian", +"Chow Chow", +"Keeshond", +"Griffon Bruxellois", +"Pembroke Welsh Corgi", +"Cardigan Welsh Corgi", +"Toy Poodle", +"Miniature Poodle", +"Standard Poodle", +"Mexican hairless dog", +"grey wolf", +"Alaskan tundra wolf", +"red wolf", +"coyote", +"dingo", +"dhole", +"African wild dog", +"hyena", +"red fox", +"kit fox", +"Arctic fox", +"grey fox", +"tabby cat", +"tiger cat", +"Persian cat", +"Siamese cat", +"Egyptian Mau", +"cougar", +"lynx", +"leopard", +"snow leopard", +"jaguar", +"lion", +"tiger", +"cheetah", +"brown bear", +"American black bear", +"polar bear", +"sloth bear", +"mongoose", +"meerkat", +"tiger beetle", +"ladybug", +"ground beetle", +"longhorn beetle", +"leaf beetle", +"dung beetle", +"rhinoceros beetle", +"weevil", +"fly", +"bee", +"ant", +"grasshopper", +"cricket", +"stick insect", +"cockroach", +"mantis", +"cicada", +"leafhopper", +"lacewing", +"dragonfly", +"damselfly", +"red admiral", +"ringlet", +"monarch butterfly", +"small white", +"sulphur butterfly", +"gossamer-winged butterfly", +"starfish", +"sea urchin", +"sea cucumber", +"cottontail rabbit", +"hare", +"Angora rabbit", +"hamster", +"porcupine", +"fox squirrel", +"marmot", +"beaver", +"guinea pig", +"common sorrel", +"zebra", +"pig", +"wild boar", +"warthog", +"hippopotamus", +"ox", +"water buffalo", +"bison", +"ram", +"bighorn sheep", +"Alpine ibex", +"hartebeest", +"impala", +"gazelle", +"dromedary", +"llama", +"weasel", +"mink", +"European polecat", +"black-footed ferret", +"otter", +"skunk", +"badger", +"armadillo", +"three-toed sloth", +"orangutan", +"gorilla", +"chimpanzee", +"gibbon", +"siamang", +"guenon", +"patas monkey", +"baboon", +"macaque", +"langur", +"black-and-white colobus", +"proboscis monkey", +"marmoset", +"white-headed capuchin", +"howler monkey", +"titi", +"Geoffroy's spider monkey", +"common squirrel monkey", +"ring-tailed lemur", +"indri", +"Asian elephant", +"African bush elephant", +"red panda", +"giant panda", +"snoek", +"eel", +"coho salmon", +"rock beauty", +"clownfish", +"sturgeon", +"garfish", +"lionfish", +"pufferfish", +"abacus", +"abaya", +"academic gown", +"accordion", +"acoustic guitar", +"aircraft carrier", +"airliner", +"airship", +"altar", +"ambulance", +"amphibious vehicle", +"analog clock", +"apiary", +"apron", +"waste container", +"assault rifle", +"backpack", +"bakery", +"balance beam", +"balloon", +"ballpoint pen", +"Band-Aid", +"banjo", +"baluster", +"barbell", +"barber chair", +"barbershop", +"barn", +"barometer", +"barrel", +"wheelbarrow", +"baseball", +"basketball", +"bassinet", +"bassoon", +"swimming cap", +"bath towel", +"bathtub", +"station wagon", +"lighthouse", +"beaker", +"military cap", +"beer bottle", +"beer glass", +"bell-cot", +"bib", +"tandem bicycle", +"bikini", +"ring binder", +"binoculars", +"birdhouse", +"boathouse", +"bobsleigh", +"bolo tie", +"poke bonnet", +"bookcase", +"bookstore", +"bottle cap", +"bow", +"bow tie", +"brass", +"bra", +"breakwater", +"breastplate", +"broom", +"bucket", +"buckle", +"bulletproof vest", +"high-speed train", +"butcher shop", +"taxicab", +"cauldron", +"candle", +"cannon", +"canoe", +"can opener", +"cardigan", +"car mirror", +"carousel", +"tool kit", +"carton", +"car wheel", +"automated teller machine", +"cassette", +"cassette player", +"castle", +"catamaran", +"CD player", +"cello", +"mobile phone", +"chain", +"chain-link fence", +"chain mail", +"chainsaw", +"chest", +"chiffonier", +"chime", +"china cabinet", +"Christmas stocking", +"church", +"movie theater", +"cleaver", +"cliff dwelling", +"cloak", +"clogs", +"cocktail shaker", +"coffee mug", +"coffeemaker", +"coil", +"combination lock", +"computer keyboard", +"confectionery store", +"container ship", +"convertible", +"corkscrew", +"cornet", +"cowboy boot", +"cowboy hat", +"cradle", +"crane (machine)", +"crash helmet", +"crate", +"infant bed", +"Crock Pot", +"croquet ball", +"crutch", +"cuirass", +"dam", +"desk", +"desktop computer", +"rotary dial telephone", +"diaper", +"digital clock", +"digital watch", +"dining table", +"dishcloth", +"dishwasher", +"disc brake", +"dock", +"dog sled", +"dome", +"doormat", +"drilling rig", +"drum", +"drumstick", +"dumbbell", +"Dutch oven", +"electric fan", +"electric guitar", +"electric locomotive", +"entertainment center", +"envelope", +"espresso machine", +"face powder", +"feather boa", +"filing cabinet", +"fireboat", +"fire engine", +"fire screen sheet", +"flagpole", +"flute", +"folding chair", +"football helmet", +"forklift", +"fountain", +"fountain pen", +"four-poster bed", +"freight car", +"French horn", +"frying pan", +"fur coat", +"garbage truck", +"gas mask", +"gas pump", +"goblet", +"go-kart", +"golf ball", +"golf cart", +"gondola", +"gong", +"gown", +"grand piano", +"greenhouse", +"grille", +"grocery store", +"guillotine", +"barrette", +"hair spray", +"half-track", +"hammer", +"hamper", +"hair dryer", +"hand-held computer", +"handkerchief", +"hard disk drive", +"harmonica", +"harp", +"harvester", +"hatchet", +"holster", +"home theater", +"honeycomb", +"hook", +"hoop skirt", +"horizontal bar", +"horse-drawn vehicle", +"hourglass", +"iPod", +"clothes iron", +"jack-o'-lantern", +"jeans", +"jeep", +"T-shirt", +"jigsaw puzzle", +"pulled rickshaw", +"joystick", +"kimono", +"knee pad", +"knot", +"lab coat", +"ladle", +"lampshade", +"laptop computer", +"lawn mower", +"lens cap", +"paper knife", +"library", +"lifeboat", +"lighter", +"limousine", +"ocean liner", +"lipstick", +"slip-on shoe", +"lotion", +"speaker", +"loupe", +"sawmill", +"magnetic compass", +"mail bag", +"mailbox", +"tights", +"tank suit", +"manhole cover", +"maraca", +"marimba", +"mask", +"match", +"maypole", +"maze", +"measuring cup", +"medicine chest", +"megalith", +"microphone", +"microwave oven", +"military uniform", +"milk can", +"minibus", +"miniskirt", +"minivan", +"missile", +"mitten", +"mixing bowl", +"mobile home", +"Model T", +"modem", +"monastery", +"monitor", +"moped", +"mortar", +"square academic cap", +"mosque", +"mosquito net", +"scooter", +"mountain bike", +"tent", +"computer mouse", +"mousetrap", +"moving van", +"muzzle", +"nail", +"neck brace", +"necklace", +"nipple", +"notebook computer", +"obelisk", +"oboe", +"ocarina", +"odometer", +"oil filter", +"organ", +"oscilloscope", +"overskirt", +"bullock cart", +"oxygen mask", +"packet", +"paddle", +"paddle wheel", +"padlock", +"paintbrush", +"pajamas", +"palace", +"pan flute", +"paper towel", +"parachute", +"parallel bars", +"park bench", +"parking meter", +"passenger car", +"patio", +"payphone", +"pedestal", +"pencil case", +"pencil sharpener", +"perfume", +"Petri dish", +"photocopier", +"plectrum", +"Pickelhaube", +"picket fence", +"pickup truck", +"pier", +"piggy bank", +"pill bottle", +"pillow", +"ping-pong ball", +"pinwheel", +"pirate ship", +"pitcher", +"hand plane", +"planetarium", +"plastic bag", +"plate rack", +"plow", +"plunger", +"Polaroid camera", +"pole", +"police van", +"poncho", +"billiard table", +"soda bottle", +"pot", +"potter's wheel", +"power drill", +"prayer rug", +"printer", +"prison", +"projectile", +"projector", +"hockey puck", +"punching bag", +"purse", +"quill", +"quilt", +"race car", +"racket", +"radiator", +"radio", +"radio telescope", +"rain barrel", +"recreational vehicle", +"reel", +"reflex camera", +"refrigerator", +"remote control", +"restaurant", +"revolver", +"rifle", +"rocking chair", +"rotisserie", +"eraser", +"rugby ball", +"ruler", +"running shoe", +"safe", +"safety pin", +"salt shaker", +"sandal", +"sarong", +"saxophone", +"scabbard", +"weighing scale", +"school bus", +"schooner", +"scoreboard", +"CRT screen", +"screw", +"screwdriver", +"seat belt", +"sewing machine", +"shield", +"shoe store", +"shoji", +"shopping basket", +"shopping cart", +"shovel", +"shower cap", +"shower curtain", +"ski", +"ski mask", +"sleeping bag", +"slide rule", +"sliding door", +"slot machine", +"snorkel", +"snowmobile", +"snowplow", +"soap dispenser", +"soccer ball", +"sock", +"solar thermal collector", +"sombrero", +"soup bowl", +"space bar", +"space heater", +"space shuttle", +"spatula", +"motorboat", +"spider web", +"spindle", +"sports car", +"spotlight", +"stage", +"steam locomotive", +"through arch bridge", +"steel drum", +"stethoscope", +"scarf", +"stone wall", +"stopwatch", +"stove", +"strainer", +"tram", +"stretcher", +"couch", +"stupa", +"submarine", +"suit", +"sundial", +"sunglass", +"sunglasses", +"sunscreen", +"suspension bridge", +"mop", +"sweatshirt", +"swimsuit", +"swing", +"switch", +"syringe", +"table lamp", +"tank", +"tape player", +"teapot", +"teddy bear", +"television", +"tennis ball", +"thatched roof", +"front curtain", +"thimble", +"threshing machine", +"throne", +"tile roof", +"toaster", +"tobacco shop", +"toilet seat", +"torch", +"totem pole", +"tow truck", +"toy store", +"tractor", +"semi-trailer truck", +"tray", +"trench coat", +"tricycle", +"trimaran", +"tripod", +"triumphal arch", +"trolleybus", +"trombone", +"tub", +"turnstile", +"typewriter keyboard", +"umbrella", +"unicycle", +"upright piano", +"vacuum cleaner", +"vase", +"vault", +"velvet", +"vending machine", +"vestment", +"viaduct", +"violin", +"volleyball", +"waffle iron", +"wall clock", +"wallet", +"wardrobe", +"military aircraft", +"sink", +"washing machine", +"water bottle", +"water jug", +"water tower", +"whiskey jug", +"whistle", +"wig", +"window screen", +"window shade", +"Windsor tie", +"wine bottle", +"wing", +"wok", +"wooden spoon", +"wool", +"split-rail fence", +"shipwreck", +"yawl", +"yurt", +"website", +"comic book", +"crossword", +"traffic sign", +"traffic light", +"dust jacket", +"menu", +"plate", +"guacamole", +"consomme", +"hot pot", +"trifle", +"ice cream", +"ice pop", +"baguette", +"bagel", +"pretzel", +"cheeseburger", +"hot dog", +"mashed potato", +"cabbage", +"broccoli", +"cauliflower", +"zucchini", +"spaghetti squash", +"acorn squash", +"butternut squash", +"cucumber", +"artichoke", +"bell pepper", +"cardoon", +"mushroom", +"Granny Smith", +"strawberry", +"orange", +"lemon", +"fig", +"pineapple", +"banana", +"jackfruit", +"custard apple", +"pomegranate", +"hay", +"carbonara", +"chocolate syrup", +"dough", +"meatloaf", +"pizza", +"pot pie", +"burrito", +"red wine", +"espresso", +"cup", +"eggnog", +"alp", +"bubble", +"cliff", +"coral reef", +"geyser", +"lakeshore", +"promontory", +"shoal", +"seashore", +"valley", +"volcano", +"baseball player", +"bridegroom", +"scuba diver", +"rapeseed", +"daisy", +"yellow lady's slipper", +"corn", +"acorn", +"rose hip", +"horse chestnut seed", +"coral fungus", +"agaric", +"gyromitra", +"stinkhorn mushroom", +"earth star", +"hen-of-the-woods", +"bolete", +"ear", +"toilet paper"] diff --git a/kerascv/makelist.py b/kerascv/makelist.py new file mode 100644 index 00000000..8498fa24 --- /dev/null +++ b/kerascv/makelist.py @@ -0,0 +1,10 @@ +''' +Use this script to generate a list of all XML files in a folder. +''' + +from glob import glob + +files = glob('*.xml') +with open('xml_list.txt', 'w') as f: + for fn in files: + f.write("%s\n" % fn) \ No newline at end of file diff --git a/kerascv/pascal2coco.py b/kerascv/pascal2coco.py new file mode 100644 index 00000000..3ffbd3b8 --- /dev/null +++ b/kerascv/pascal2coco.py @@ -0,0 +1,152 @@ +# adapted from https://blog.roboflow.com/how-to-convert-annotations-from-voc-xml-to-coco-json/ + +import os +import argparse +import json +import xml.etree.ElementTree as ET +from typing import Dict, List +from tqdm import tqdm +import re + + +def get_label2id(labels_path: str) -> Dict[str, int]: + """id is 1 start""" + with open(labels_path, 'r') as f: + labels_str = f.read().split() + labels_ids = list(range(0, len(labels_str))) + return dict(zip(labels_str, labels_ids)) + + +def get_annpaths(ann_dir_path: str = None, + ann_ids_path: str = None, + ext: str = '', + annpaths_list_path: str = None) -> List[str]: + # If use annotation paths list + if annpaths_list_path is not None: + with open(annpaths_list_path, 'r') as f: + ann_paths = f.read().split() + return ann_paths + + # If use annotaion ids list + ext_with_dot = '.' + ext if ext != '' else '' + with open(ann_ids_path, 'r') as f: + ann_ids = f.read().split() + ann_paths = [os.path.join(ann_dir_path, aid+ext_with_dot) for aid in ann_ids] + return ann_paths + + +def get_image_info(annotation_root, extract_num_from_imgid=True): + path = annotation_root.findtext('path') + if path is None: + filename = annotation_root.findtext('filename') + else: + filename = os.path.basename(path) + img_name = os.path.basename(filename) + img_id = os.path.splitext(img_name)[0] + if extract_num_from_imgid and isinstance(img_id, str): + img_id = int(re.findall(r'\d+', img_id)[0]) + + size = annotation_root.find('size') + width = int(size.findtext('width')) + height = int(size.findtext('height')) + + image_info = { + 'file_name': filename, + 'height': height, + 'width': width, + 'id': img_id + } + return image_info + + +def get_coco_annotation_from_obj(obj, label2id): + label = obj.findtext('name') + assert label in label2id, f"Error: {label} is not in label2id !" + category_id = label2id[label] + bndbox = obj.find('bndbox') + xmin = int(bndbox.findtext('xmin')) - 1 + ymin = int(bndbox.findtext('ymin')) - 1 + xmax = int(bndbox.findtext('xmax')) + ymax = int(bndbox.findtext('ymax')) + assert xmax > xmin and ymax > ymin, f"Box size error !: (xmin, ymin, xmax, ymax): {xmin, ymin, xmax, ymax}" + o_width = xmax - xmin + o_height = ymax - ymin + ann = { + 'area': o_width * o_height, + 'iscrowd': 0, + 'bbox': [xmin, ymin, o_width, o_height], + 'category_id': category_id, + 'ignore': 0, + 'segmentation': [] # This script is not for segmentation + } + return ann + + +def convert_xmls_to_cocojson(annotation_paths: List[str], + label2id: Dict[str, int], + output_jsonpath: str, + extract_num_from_imgid: bool = True): + output_json_dict = { + "images": [], + "type": "instances", + "annotations": [], + "categories": [] + } + bnd_id = 1 # START_BOUNDING_BOX_ID, TODO input as args ? + print('Start converting !') + for a_path in tqdm(annotation_paths): + # Read annotation xml + ann_tree = ET.parse(a_path) + ann_root = ann_tree.getroot() + + img_info = get_image_info(annotation_root=ann_root, + extract_num_from_imgid=extract_num_from_imgid) + img_id = img_info['id'] + output_json_dict['images'].append(img_info) + + for obj in ann_root.findall('object'): + ann = get_coco_annotation_from_obj(obj=obj, label2id=label2id) + ann.update({'image_id': img_id, 'id': bnd_id}) + output_json_dict['annotations'].append(ann) + bnd_id = bnd_id + 1 + + for label, label_id in label2id.items(): + category_info = {'supercategory': 'none', 'id': label_id, 'name': label} + output_json_dict['categories'].append(category_info) + + with open(output_jsonpath, 'w') as f: + output_json = json.dumps(output_json_dict) + f.write(output_json) + + +def main(): + parser = argparse.ArgumentParser( + description='This script support converting voc format xmls to coco format json') + parser.add_argument('--ann_dir', type=str, default=None, + help='path to annotation files directory. It is not need when use --ann_paths_list') + parser.add_argument('--ann_ids', type=str, default=None, + help='path to annotation files ids list. It is not need when use --ann_paths_list') + parser.add_argument('--ann_paths_list', type=str, default=None, + help='path of annotation paths list. It is not need when use --ann_dir and --ann_ids') + parser.add_argument('--labels', type=str, default=None, + help='path to label list.') + parser.add_argument('--output', type=str, default='output.json', help='path to output json file') + parser.add_argument('--ext', type=str, default='', help='additional extension of annotation file') + args = parser.parse_args() + label2id = get_label2id(labels_path=args.labels) + ann_paths = get_annpaths( + ann_dir_path=args.ann_dir, + ann_ids_path=args.ann_ids, + ext=args.ext, + annpaths_list_path=args.ann_paths_list + ) + convert_xmls_to_cocojson( + annotation_paths=ann_paths, + label2id=label2id, + output_jsonpath=args.output, + extract_num_from_imgid=True + ) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/linear_algebra/WHERE ARE THE NOTEBOOKS.txt b/linear_algebra/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/linear_algebra/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/linear_algebra/extra_reading.txt b/linear_algebra/extra_reading.txt new file mode 100644 index 00000000..865e98be --- /dev/null +++ b/linear_algebra/extra_reading.txt @@ -0,0 +1,6 @@ +Introduction to Linear Algebra by Gilbert Strang +https://amzn.to/2G3bvW1 + +Still Don't Understand Gravity? This Will Help +- this is included not because it's about calculus, but because it's yet another educator explaining why practice is important +https://www.youtube.com/watch?v=cP2uVarXi1A \ No newline at end of file diff --git a/linear_regression_class/systolic.py b/linear_regression_class/systolic.py index b7451837..7d594670 100644 --- a/linear_regression_class/systolic.py +++ b/linear_regression_class/systolic.py @@ -19,7 +19,7 @@ import numpy as np import pandas as pd -df = pd.read_excel('mlr02.xls') +df = pd.read_excel('mlr02.xls', engine='xlrd') X = df.values # using age to predict systolic blood pressure diff --git a/logistic_regression_class/l1_regularization.py b/logistic_regression_class/l1_regularization.py index 86b79707..5bcbd809 100644 --- a/logistic_regression_class/l1_regularization.py +++ b/logistic_regression_class/l1_regularization.py @@ -9,6 +9,7 @@ import numpy as np +from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt def sigmoid(z): @@ -19,6 +20,7 @@ def sigmoid(z): # uniformly distributed numbers between -5, +5 X = (np.random.random((N, D)) - 0.5)*10 +# X = (np.random.randn(N, D) - 0.5)*10 # true weights - only the first 3 dimensions of X affect Y true_w = np.array([1, 0.5, -0.5] + [0]*(D - 3)) @@ -26,6 +28,15 @@ def sigmoid(z): # generate Y - add noise with variance 0.5 Y = np.round(sigmoid(X.dot(true_w) + np.random.randn(N)*0.5)) + + + +# let's plot the data to see what it looks like +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +ax.scatter(X[:,0], X[:,1], X[:,2], c=Y) +plt.show() + # perform gradient descent to find w costs = [] # keep track of squared error cost w = np.random.randn(D) / np.sqrt(D) # randomly initialize w diff --git a/logistic_regression_class/logistic3.py b/logistic_regression_class/logistic3.py index 930d9d26..67fd027a 100644 --- a/logistic_regression_class/logistic3.py +++ b/logistic_regression_class/logistic3.py @@ -65,7 +65,6 @@ def cross_entropy(T, Y): print(cross_entropy(T, Y)) # gradient descent weight udpate - # w += learning_rate * np.dot((T - Y).T, Xb) # old w += learning_rate * Xb.T.dot(T - Y) # recalculate Y diff --git a/logistic_regression_class/logistic4.py b/logistic_regression_class/logistic4.py index 77d9d2cf..6fa77c36 100644 --- a/logistic_regression_class/logistic4.py +++ b/logistic_regression_class/logistic4.py @@ -64,7 +64,6 @@ def cross_entropy(T, Y): print(cross_entropy(T, Y)) # gradient descent weight udpate with regularization - # w += learning_rate * ( np.dot((T - Y).T, Xb) - 0.1*w ) # old w += learning_rate * ( Xb.T.dot(T - Y) - 0.1*w ) # recalculate Y diff --git a/logistic_regression_class/logistic_donut.py b/logistic_regression_class/logistic_donut.py index de5ec9fa..db8cc50a 100644 --- a/logistic_regression_class/logistic_donut.py +++ b/logistic_regression_class/logistic_donut.py @@ -73,7 +73,6 @@ def cross_entropy(T, Y): print(e) # gradient descent weight udpate with regularization - # w += learning_rate * ( np.dot((T - Y).T, Xb) - 0.01*w ) # old w += learning_rate * ( Xb.T.dot(T - Y) - 0.1*w ) # recalculate Y diff --git a/matrix_calculus/extra_reading.txt b/matrix_calculus/extra_reading.txt new file mode 100644 index 00000000..a19af06d --- /dev/null +++ b/matrix_calculus/extra_reading.txt @@ -0,0 +1,2 @@ +The Matrix Cookbook +https://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf \ No newline at end of file diff --git a/naive_bayes/WHERE ARE THE NOTEBOOKS.txt b/naive_bayes/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/naive_bayes/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/naive_bayes/extra_reading.txt b/naive_bayes/extra_reading.txt new file mode 100644 index 00000000..52e5228b --- /dev/null +++ b/naive_bayes/extra_reading.txt @@ -0,0 +1,8 @@ +Complement Naive Bayes +https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf + +Semi-Supervised Learning with Naive Bayes +http://www.kamalnigam.com/papers/emcat-aaai98.pdf + +An empirical study of the naive Bayes classifier +https://faculty.cc.gatech.edu/~isbell/reading/papers/Rish.pdf \ No newline at end of file diff --git a/nlp_class/cipher_placeholder.py b/nlp_class/cipher_placeholder.py new file mode 100644 index 00000000..0c08b818 --- /dev/null +++ b/nlp_class/cipher_placeholder.py @@ -0,0 +1,23 @@ +# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python +# https://www.udemy.com/data-science-natural-language-processing-in-python + +# Author: http://lazyprogrammer.me + +# Get the data from here: +# https://lazyprogrammer.me/course_files/moby_dick.txt + +### encode a message + +# this is a random excerpt from Project Gutenberg's +# The Adventures of Sherlock Holmes, by Arthur Conan Doyle +# https://www.gutenberg.org/ebooks/1661 + +original_message = '''I then lounged down the street and found, +as I expected, that there was a mews in a lane which runs down +by one wall of the garden. I lent the ostlers a hand in rubbing +down their horses, and received in exchange twopence, a glass of +half-and-half, two fills of shag tobacco, and as much information +as I could desire about Miss Adler, to say nothing of half a dozen +other people in the neighbourhood in whom I was not in the least +interested, but whose biographies I was compelled to listen to. +''' \ No newline at end of file diff --git a/nlp_class/sentiment.py b/nlp_class/sentiment.py index c852ac2f..e5f58477 100644 --- a/nlp_class/sentiment.py +++ b/nlp_class/sentiment.py @@ -34,22 +34,13 @@ # load the reviews # data courtesy of http://www.cs.jhu.edu/~mdredze/datasets/sentiment/index2.html -positive_reviews = BeautifulSoup(open('electronics/positive.review').read()) +positive_reviews = BeautifulSoup(open('electronics/positive.review').read(), features="html5lib") positive_reviews = positive_reviews.findAll('review_text') -negative_reviews = BeautifulSoup(open('electronics/negative.review').read()) +negative_reviews = BeautifulSoup(open('electronics/negative.review').read(), features="html5lib") negative_reviews = negative_reviews.findAll('review_text') -# there are more positive reviews than negative reviews -# so let's take a random sample so we have balanced classes -# np.random.shuffle(positive_reviews) -# positive_reviews = positive_reviews[:len(negative_reviews)] -# we can also oversample the negative reviews -diff = len(positive_reviews) - len(negative_reviews) -idxs = np.random.choice(len(negative_reviews), size=diff) -extra = [negative_reviews[i] for i in idxs] -negative_reviews += extra # first let's just try to tokenize the text using nltk's tokenizer # let's take the first review for example: diff --git a/nlp_class/spam2.py b/nlp_class/spam2.py index b5e069cc..c8ae5414 100644 --- a/nlp_class/spam2.py +++ b/nlp_class/spam2.py @@ -35,22 +35,26 @@ df['b_labels'] = df['labels'].map({'ham': 0, 'spam': 1}) Y = df['b_labels'].values +# split up the data +df_train, df_test, Ytrain, Ytest = train_test_split(df['data'], Y, test_size=0.33) + # try multiple ways of calculating features -# tfidf = TfidfVectorizer(decode_error='ignore') -# X = tfidf.fit_transform(df['data']) +tfidf = TfidfVectorizer(decode_error='ignore') +Xtrain = tfidf.fit_transform(df_train) +Xtest = tfidf.transform(df_test) + +# count_vectorizer = CountVectorizer(decode_error='ignore') +# Xtrain = count_vectorizer.fit_transform(df_train) +# Xtest = count_vectorizer.transform(df_test) -count_vectorizer = CountVectorizer(decode_error='ignore') -X = count_vectorizer.fit_transform(df['data']) -# split up the data -Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) # create the model, train it, print scores model = MultinomialNB() model.fit(Xtrain, Ytrain) print("train score:", model.score(Xtrain, Ytrain)) print("test score:", model.score(Xtest, Ytest)) -exit() +# exit() # visualize the data @@ -69,6 +73,7 @@ def visualize(label): # see what we're getting wrong +X = tfidf.transform(df['data']) df['predictions'] = model.predict(X) # things that should be spam diff --git a/nlp_class2/bow_classifier.py b/nlp_class2/bow_classifier.py index 60c1a92d..25588e3b 100644 --- a/nlp_class2/bow_classifier.py +++ b/nlp_class2/bow_classifier.py @@ -17,6 +17,7 @@ # data from https://www.cs.umb.edu/~smimarog/textmining/datasets/ +# alternate source: https://lazyprogrammer.me/course_files/deepnlp_classification_data.zip train = pd.read_csv('../large_files/r8-train-all-terms.txt', header=None, sep='\t') test = pd.read_csv('../large_files/r8-test-all-terms.txt', header=None, sep='\t') train.columns = ['label', 'content'] diff --git a/nlp_class2/glove_tf.py b/nlp_class2/glove_tf.py index aa8371ad..9db18bb4 100644 --- a/nlp_class2/glove_tf.py +++ b/nlp_class2/glove_tf.py @@ -22,6 +22,9 @@ from rnn_class.util import get_wikipedia_data from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + class Glove: @@ -119,22 +122,22 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, tfb = tf.Variable(b.reshape(V, 1).astype(np.float32)) tfU = tf.Variable(U.astype(np.float32)) tfc = tf.Variable(c.reshape(1, V).astype(np.float32)) - tfLogX = tf.placeholder(tf.float32, shape=(V, V)) - tffX = tf.placeholder(tf.float32, shape=(V, V)) + tfLogX = tf.compat.v1.placeholder(tf.float32, shape=(V, V)) + tffX = tf.compat.v1.placeholder(tf.float32, shape=(V, V)) - delta = tf.matmul(tfW, tf.transpose(tfU)) + tfb + tfc + mu - tfLogX - cost = tf.reduce_sum(tffX * delta * delta) + delta = tf.matmul(tfW, tf.transpose(a=tfU)) + tfb + tfc + mu - tfLogX + cost = tf.reduce_sum(input_tensor=tffX * delta * delta) regularized_cost = cost for param in (tfW, tfU): - regularized_cost += reg*tf.reduce_sum(param * param) + regularized_cost += reg*tf.reduce_sum(input_tensor=param * param) - train_op = tf.train.MomentumOptimizer( + train_op = tf.compat.v1.train.MomentumOptimizer( learning_rate, momentum=0.9 ).minimize(regularized_cost) # train_op = tf.train.AdamOptimizer(1e-3).minimize(regularized_cost) - init = tf.global_variables_initializer() - session = tf.InteractiveSession() + init = tf.compat.v1.global_variables_initializer() + session = tf.compat.v1.InteractiveSession() session.run(init) costs = [] diff --git a/nlp_class2/neural_network2.py b/nlp_class2/neural_network2.py index 573dd9e9..159dc571 100644 --- a/nlp_class2/neural_network2.py +++ b/nlp_class2/neural_network2.py @@ -96,18 +96,23 @@ def softmax(a): # # original: W1 = W1 - lr * inputs.T.dot(dhidden) # VxN NxD --> VxD # fastest way + W1_copy = W1.copy() np.subtract.at(W1, inputs, lr * dhidden) - # test this - # i = 0 - # for w in inputs: # don't include end token - # W1[w] = W1[w] - lr * dhidden[i] - # i += 1 - # vs this + # W1_test = W1_copy.copy() # oh_inputs = np.zeros((n - 1, V)) # oh_inputs[np.arange(n - 1), sentence[:n-1]] = 1 - # W1 = W1 - lr * oh_inputs.T.dot(dhidden) + # W1_test = W1_test - lr * oh_inputs.T.dot(dhidden) + # assert(np.allclose(W1_test, W1)) + + # vs this + # W1_test = W1_copy.copy() + # i = 0 + # for w in inputs: # don't include end token + # W1_test[w] = W1_test[w] - lr * dhidden[i] + # i += 1 + # assert(np.allclose(W1_test, W1)) # keep track of the bigram loss # only do it for the first epoch to avoid redundancy diff --git a/nlp_class2/pos_ner_keras.py b/nlp_class2/pos_ner_keras.py index a150fc29..7a1335e1 100644 --- a/nlp_class2/pos_ner_keras.py +++ b/nlp_class2/pos_ner_keras.py @@ -217,8 +217,8 @@ def get_data_ner(split_sequences=False): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class2/recursive_tensorflow.py b/nlp_class2/recursive_tensorflow.py index 4c43e3df..02f02360 100644 --- a/nlp_class2/recursive_tensorflow.py +++ b/nlp_class2/recursive_tensorflow.py @@ -17,6 +17,9 @@ from datetime import datetime from util import init_weight, get_ptb_data, display_tree +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + def get_labels(tree): # must be returned in the same order as tree logits are returned @@ -73,22 +76,22 @@ def fit(self, trees, lr=1e-1, mu=0.9, reg=0.1, epochs=5): cost = self.get_cost(logits, labels, reg) costs.append(cost) - prediction = tf.argmax(logits, 1) + prediction = tf.argmax(input=logits, axis=1) predictions.append(prediction) - train_op = tf.train.MomentumOptimizer(lr, mu).minimize(cost) + train_op = tf.compat.v1.train.MomentumOptimizer(lr, mu).minimize(cost) train_ops.append(train_op) # save for later so we don't have to recompile self.predictions = predictions self.all_labels = all_labels - self.saver = tf.train.Saver() + self.saver = tf.compat.v1.train.Saver() - init = tf.initialize_all_variables() + init = tf.compat.v1.initialize_all_variables() actual_costs = [] per_epoch_costs = [] correct_rates = [] - with tf.Session() as session: + with tf.compat.v1.Session() as session: session.run(init) for i in range(epochs): @@ -136,7 +139,7 @@ def fit(self, trees, lr=1e-1, mu=0.9, reg=0.1, epochs=5): def get_cost(self, logits, labels, reg): cost = tf.reduce_mean( - tf.nn.sparse_softmax_cross_entropy_with_logits( + input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels ) @@ -150,7 +153,7 @@ def get_cost(self, logits, labels, reg): def get_output_recursive(self, tree, list_of_logits, is_root=True): if tree.word is not None: # this is a leaf node - x = tf.nn.embedding_lookup(self.We, [tree.word]) + x = tf.nn.embedding_lookup(params=self.We, ids=[tree.word]) else: # this node has children x1 = self.get_output_recursive(tree.left, list_of_logits, is_root=False) @@ -197,12 +200,12 @@ def score(self, trees): labels = get_labels(t) all_labels.append(labels) - prediction = tf.argmax(logits, 1) + prediction = tf.argmax(input=logits, axis=1) predictions.append(prediction) n_correct = 0 n_total = 0 - with tf.Session() as session: + with tf.compat.v1.Session() as session: self.saver.restore(session, "recursive.ckpt") for prediction, y in zip(predictions, all_labels): p = session.run(prediction) diff --git a/nlp_class2/rntn_tensorflow_rnn.py b/nlp_class2/rntn_tensorflow_rnn.py index 29caba32..816ff4a2 100644 --- a/nlp_class2/rntn_tensorflow_rnn.py +++ b/nlp_class2/rntn_tensorflow_rnn.py @@ -17,6 +17,9 @@ from datetime import datetime from sklearn.metrics import f1_score +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + class RecursiveNN: @@ -54,10 +57,10 @@ def fit(self, trees, test_trees, reg=1e-3, epochs=8, train_inner_nodes=False): self.weights = [self.We, self.W11, self.W22, self.W12, self.W1, self.W2, self.Wo] - words = tf.placeholder(tf.int32, shape=(None,), name='words') - left_children = tf.placeholder(tf.int32, shape=(None,), name='left_children') - right_children = tf.placeholder(tf.int32, shape=(None,), name='right_children') - labels = tf.placeholder(tf.int32, shape=(None,), name='labels') + words = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='words') + left_children = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='left_children') + right_children = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='right_children') + labels = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='labels') # save for later self.words = words @@ -89,9 +92,9 @@ def recurrence(hiddens, n): # any non-word will have index -1 h_n = tf.cond( - w >= 0, - lambda: tf.nn.embedding_lookup(self.We, w), - lambda: recursive_net_transform(hiddens, n) + pred=w >= 0, + true_fn=lambda: tf.nn.embedding_lookup(params=self.We, ids=w), + false_fn=lambda: recursive_net_transform(hiddens, n) ) hiddens = hiddens.write(n, h_n) n = tf.add(n, 1) @@ -100,7 +103,7 @@ def recurrence(hiddens, n): def condition(hiddens, n): # loop should continue while n < len(words) - return tf.less(n, tf.shape(words)[0]) + return tf.less(n, tf.shape(input=words)[0]) hiddens = tf.TensorArray( @@ -112,44 +115,44 @@ def condition(hiddens, n): ) hiddens, _ = tf.while_loop( - condition, - recurrence, - [hiddens, tf.constant(0)], + cond=condition, + body=recurrence, + loop_vars=[hiddens, tf.constant(0)], parallel_iterations=1 ) h = hiddens.stack() logits = tf.matmul(h, self.Wo) + self.bo - prediction_op = tf.argmax(logits, axis=1) + prediction_op = tf.argmax(input=logits, axis=1) self.prediction_op = prediction_op rcost = reg*sum(tf.nn.l2_loss(p) for p in self.weights) if train_inner_nodes: # filter out -1s - labeled_indices = tf.where(labels >= 0) + labeled_indices = tf.compat.v1.where(labels >= 0) cost_op = tf.reduce_mean( - tf.nn.sparse_softmax_cross_entropy_with_logits( + input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.gather(logits, labeled_indices), labels=tf.gather(labels, labeled_indices), ) ) + rcost else: cost_op = tf.reduce_mean( - tf.nn.sparse_softmax_cross_entropy_with_logits( + input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits[-1], labels=labels[-1], ) ) + rcost - train_op = tf.train.AdagradOptimizer(learning_rate=8e-3).minimize(cost_op) + train_op = tf.compat.v1.train.AdagradOptimizer(learning_rate=8e-3).minimize(cost_op) # train_op = tf.train.MomentumOptimizer(learning_rate=8e-3, momentum=0.9).minimize(cost_op) # NOTE: If you're using GPU, InteractiveSession breaks # AdagradOptimizer and some other optimizers # change to tf.Session() if so. - self.session = tf.Session() - init_op = tf.global_variables_initializer() + self.session = tf.compat.v1.Session() + init_op = tf.compat.v1.global_variables_initializer() self.session.run(init_op) diff --git a/nlp_class2/word2vec_tf.py b/nlp_class2/word2vec_tf.py index fee4ad99..d272b003 100644 --- a/nlp_class2/word2vec_tf.py +++ b/nlp_class2/word2vec_tf.py @@ -25,6 +25,9 @@ import sys import string +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + # unfortunately these work different ways @@ -131,36 +134,36 @@ def train_model(savedir): # create the model - tf_input = tf.placeholder(tf.int32, shape=(None,)) - tf_negword = tf.placeholder(tf.int32, shape=(None,)) - tf_context = tf.placeholder(tf.int32, shape=(None,)) # targets (context) + tf_input = tf.compat.v1.placeholder(tf.int32, shape=(None,)) + tf_negword = tf.compat.v1.placeholder(tf.int32, shape=(None,)) + tf_context = tf.compat.v1.placeholder(tf.int32, shape=(None,)) # targets (context) tfW = tf.Variable(W) tfV = tf.Variable(V.T) # biases = tf.Variable(np.zeros(vocab_size, dtype=np.float32)) def dot(A, B): C = A * B - return tf.reduce_sum(C, axis=1) + return tf.reduce_sum(input_tensor=C, axis=1) # correct middle word output - emb_input = tf.nn.embedding_lookup(tfW, tf_input) # 1 x D - emb_output = tf.nn.embedding_lookup(tfV, tf_context) # N x D + emb_input = tf.nn.embedding_lookup(params=tfW, ids=tf_input) # 1 x D + emb_output = tf.nn.embedding_lookup(params=tfV, ids=tf_context) # N x D correct_output = dot(emb_input, emb_output) # N # emb_input = tf.transpose(emb_input, (1, 0)) # correct_output = tf.matmul(emb_output, emb_input) pos_loss = tf.nn.sigmoid_cross_entropy_with_logits( - labels=tf.ones(tf.shape(correct_output)), logits=correct_output) + labels=tf.ones(tf.shape(input=correct_output)), logits=correct_output) # incorrect middle word output - emb_input = tf.nn.embedding_lookup(tfW, tf_negword) + emb_input = tf.nn.embedding_lookup(params=tfW, ids=tf_negword) incorrect_output = dot(emb_input, emb_output) # emb_input = tf.transpose(emb_input, (1, 0)) # incorrect_output = tf.matmul(emb_output, emb_input) neg_loss = tf.nn.sigmoid_cross_entropy_with_logits( - labels=tf.zeros(tf.shape(incorrect_output)), logits=incorrect_output) + labels=tf.zeros(tf.shape(input=incorrect_output)), logits=incorrect_output) # total loss - loss = tf.reduce_mean(pos_loss) + tf.reduce_mean(neg_loss) + loss = tf.reduce_mean(input_tensor=pos_loss) + tf.reduce_mean(input_tensor=neg_loss) # output = hidden.dot(tfV) @@ -179,12 +182,12 @@ def dot(A, B): # optimizer # train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) - train_op = tf.train.MomentumOptimizer(0.1, momentum=0.9).minimize(loss) + train_op = tf.compat.v1.train.MomentumOptimizer(0.1, momentum=0.9).minimize(loss) # train_op = tf.train.AdamOptimizer(1e-2).minimize(loss) # make session - session = tf.Session() - init_op = tf.global_variables_initializer() + session = tf.compat.v1.Session() + init_op = tf.compat.v1.global_variables_initializer() session.run(init_op) diff --git a/nlp_class3/attention.py b/nlp_class3/attention.py index d7e9c809..19ba3066 100644 --- a/nlp_class3/attention.py +++ b/nlp_class3/attention.py @@ -16,9 +16,13 @@ import numpy as np import matplotlib.pyplot as plt -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass # make sure we do softmax over the time axis @@ -34,10 +38,10 @@ def softmax_over_time(x): # config BATCH_SIZE = 64 -EPOCHS = 100 -LATENT_DIM = 256 -LATENT_DIM_DECODER = 256 # idea: make it different to ensure things all fit together properly! -NUM_SAMPLES = 10000 +EPOCHS = 30 +LATENT_DIM = 400 +LATENT_DIM_DECODER = 400 # idea: make it different to ensure things all fit together properly! +NUM_SAMPLES = 20000 MAX_SEQUENCE_LENGTH = 100 MAX_NUM_WORDS = 20000 EMBEDDING_DIM = 100 @@ -65,7 +69,7 @@ def softmax_over_time(x): continue # split up the input and translation - input_text, translation = line.rstrip().split('\t') + input_text, translation, *rest = line.rstrip().split('\t') # make the target input and output # recall we'll be using teacher forcing @@ -190,7 +194,8 @@ def softmax_over_time(x): # assign the values for i, d in enumerate(decoder_targets): for t, word in enumerate(d): - decoder_targets_one_hot[i, t, word] = 1 + if word > 0: + decoder_targets_one_hot[i, t, word] = 1 @@ -324,11 +329,33 @@ def stack_and_transpose(x): outputs=outputs ) + +def custom_loss(y_true, y_pred): + # both are of shape N x T x K + mask = K.cast(y_true > 0, dtype='float32') + out = mask * y_true * K.log(y_pred) + return -K.sum(out) / K.sum(mask) + + +def acc(y_true, y_pred): + # both are of shape N x T x K + targ = K.argmax(y_true, axis=-1) + pred = K.argmax(y_pred, axis=-1) + correct = K.cast(K.equal(targ, pred), dtype='float32') + + # 0 is padding, don't include those + mask = K.cast(K.greater(targ, 0), dtype='float32') + n_correct = K.sum(mask * correct) + n_total = K.sum(mask) + return n_correct / n_total + + # compile the model -model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) +model.compile(optimizer='adam', loss=custom_loss, metrics=[acc]) +# model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc']) # train the model -z = np.zeros((NUM_SAMPLES, LATENT_DIM_DECODER)) # initial [s, c] +z = np.zeros((len(encoder_inputs), LATENT_DIM_DECODER)) # initial [s, c] r = model.fit( [encoder_inputs, decoder_inputs, z, z], decoder_targets_one_hot, batch_size=BATCH_SIZE, @@ -336,6 +363,8 @@ def stack_and_transpose(x): validation_split=0.2 ) + + # plot some data plt.plot(r.history['loss'], label='loss') plt.plot(r.history['val_loss'], label='val_loss') @@ -343,8 +372,8 @@ def stack_and_transpose(x): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() @@ -371,6 +400,9 @@ def stack_and_transpose(x): # combine context with last word decoder_lstm_input = context_last_word_concat_layer([context, decoder_inputs_single_x]) + + + # lstm and final dense o, s, c = decoder_lstm(decoder_lstm_input, initial_state=[initial_s, initial_c]) decoder_outputs = decoder_dense(o) diff --git a/nlp_class3/bilstm_mnist.py b/nlp_class3/bilstm_mnist.py index 4002b2ae..3da99920 100644 --- a/nlp_class3/bilstm_mnist.py +++ b/nlp_class3/bilstm_mnist.py @@ -13,9 +13,13 @@ import pandas as pd import matplotlib.pyplot as plt -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass def get_mnist(limit=None): @@ -97,8 +101,8 @@ def get_mnist(limit=None): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/bilstm_test.py b/nlp_class3/bilstm_test.py index 800902f9..2f31b489 100644 --- a/nlp_class3/bilstm_test.py +++ b/nlp_class3/bilstm_test.py @@ -9,10 +9,13 @@ import numpy as np import matplotlib.pyplot as plt -import keras.backend as K -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass T = 8 diff --git a/nlp_class3/cnn_toxic.py b/nlp_class3/cnn_toxic.py index 5ecf0443..f0c55604 100644 --- a/nlp_class3/cnn_toxic.py +++ b/nlp_class3/cnn_toxic.py @@ -19,6 +19,7 @@ # Download the data: # https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge +# https://lazyprogrammer.me/course_files/toxic_comment_train.csv # Download the word vectors: # http://nlp.stanford.edu/data/glove.6B.zip @@ -70,11 +71,15 @@ s = sorted(len(s) for s in sequences) print("median sequence length:", s[len(s) // 2]) +print("max word index:", max(max(seq) for seq in sequences if len(seq) > 0)) + # get word -> integer mapping word2idx = tokenizer.word_index print('Found %s unique tokens.' % len(word2idx)) +# exit() + # pad sequences so that we get a N x T matrix data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH) @@ -144,8 +149,8 @@ plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/lstm_toxic.py b/nlp_class3/lstm_toxic.py index 8dc41f97..113c3c5d 100644 --- a/nlp_class3/lstm_toxic.py +++ b/nlp_class3/lstm_toxic.py @@ -20,13 +20,14 @@ from sklearn.metrics import roc_auc_score import keras.backend as K -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +# if len(K.tensorflow_backend._get_available_gpus()) > 0: +# from keras.layers import CuDNNLSTM as LSTM +# from keras.layers import CuDNNGRU as GRU # Download the data: # https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge +# https://lazyprogrammer.me/course_files/toxic_comment_train.csv # Download the word vectors: # http://nlp.stanford.edu/data/glove.6B.zip @@ -115,8 +116,8 @@ # create an LSTM network with a single LSTM input_ = Input(shape=(MAX_SEQUENCE_LENGTH,)) x = embedding_layer(input_) -# x = LSTM(15, return_sequences=True)(x) -x = Bidirectional(LSTM(15, return_sequences=True))(x) +x = LSTM(15, return_sequences=True)(x) +# x = Bidirectional(LSTM(15, return_sequences=True))(x) x = GlobalMaxPool1D()(x) output = Dense(len(possible_labels), activation="sigmoid")(x) @@ -124,7 +125,7 @@ model.compile( loss='binary_crossentropy', optimizer=Adam(lr=0.01), - metrics=['accuracy'] + metrics=['accuracy'], ) @@ -144,8 +145,8 @@ plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/memory_network.py b/nlp_class3/memory_network.py index a73a3eed..52f4c291 100644 --- a/nlp_class3/memory_network.py +++ b/nlp_class3/memory_network.py @@ -43,7 +43,7 @@ def tokenize(sent): >>> tokenize('Bob dropped the apple. Where is the apple?') ['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?'] ''' - return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()] + return [x.strip() for x in re.split('(\W+?)', sent) if x.strip()] @@ -425,7 +425,7 @@ def hop(query, story): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/poetry.py b/nlp_class3/poetry.py index 5b3cfac7..74fac502 100644 --- a/nlp_class3/poetry.py +++ b/nlp_class3/poetry.py @@ -17,10 +17,13 @@ from keras.preprocessing.sequence import pad_sequences from keras.optimizers import Adam, SGD -import keras.backend as K -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass # some configuration @@ -160,8 +163,8 @@ plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/simple_rnn_test.py b/nlp_class3/simple_rnn_test.py index b67a4b29..e6f84430 100644 --- a/nlp_class3/simple_rnn_test.py +++ b/nlp_class3/simple_rnn_test.py @@ -9,10 +9,13 @@ import numpy as np import matplotlib.pyplot as plt -import keras.backend as K -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass T = 8 diff --git a/nlp_class3/wseq2seq.py b/nlp_class3/wseq2seq.py index 384dbf46..0f2e1c70 100644 --- a/nlp_class3/wseq2seq.py +++ b/nlp_class3/wseq2seq.py @@ -15,18 +15,20 @@ import numpy as np import matplotlib.pyplot as plt -import keras.backend as K -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass # some config BATCH_SIZE = 64 # Batch size for training. -EPOCHS = 100 # Number of epochs to train for. +EPOCHS = 40 # Number of epochs to train for. LATENT_DIM = 256 # Latent dimensionality of the encoding space. NUM_SAMPLES = 10000 # Number of samples to train on. -MAX_SEQUENCE_LENGTH = 100 MAX_NUM_WORDS = 20000 EMBEDDING_DIM = 100 @@ -50,7 +52,7 @@ continue # split up the input and translation - input_text, translation = line.rstrip().split('\t') + input_text, translation, *rest = line.rstrip().split('\t') # make the target input and output # recall we'll be using teacher forcing @@ -166,7 +168,8 @@ # assign the values for i, d in enumerate(decoder_targets): for t, word in enumerate(d): - decoder_targets_one_hot[i, t, word] = 1 + if word != 0: + decoder_targets_one_hot[i, t, word] = 1 @@ -191,7 +194,7 @@ # this word embedding will not use pre-trained vectors # although you could -decoder_embedding = Embedding(num_words_output, LATENT_DIM) +decoder_embedding = Embedding(num_words_output, EMBEDDING_DIM) decoder_inputs_x = decoder_embedding(decoder_inputs_placeholder) # since the decoder is a "to-many" model we want to have @@ -219,12 +222,37 @@ # Create the model object model = Model([encoder_inputs_placeholder, decoder_inputs_placeholder], decoder_outputs) + +def custom_loss(y_true, y_pred): + # both are of shape N x T x K + mask = K.cast(y_true > 0, dtype='float32') + out = mask * y_true * K.log(y_pred) + return -K.sum(out) / K.sum(mask) + + +def acc(y_true, y_pred): + # both are of shape N x T x K + targ = K.argmax(y_true, axis=-1) + pred = K.argmax(y_pred, axis=-1) + correct = K.cast(K.equal(targ, pred), dtype='float32') + + # 0 is padding, don't include those + mask = K.cast(K.greater(targ, 0), dtype='float32') + n_correct = K.sum(mask * correct) + n_total = K.sum(mask) + return n_correct / n_total + +model.compile(optimizer='adam', loss=custom_loss, metrics=[acc]) + # Compile the model and train it -model.compile( - optimizer='rmsprop', - loss='categorical_crossentropy', - metrics=['accuracy'] -) +# model.compile( +# optimizer='rmsprop', +# loss='categorical_crossentropy', +# metrics=['accuracy'] +# ) + + + r = model.fit( [encoder_inputs, decoder_inputs], decoder_targets_one_hot, batch_size=BATCH_SIZE, @@ -239,8 +267,8 @@ plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_v2/WHERE ARE THE NOTEBOOKS.txt b/nlp_v2/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/nlp_v2/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/nlp_v2/extra_reading.txt b/nlp_v2/extra_reading.txt new file mode 100644 index 00000000..d7d382f9 --- /dev/null +++ b/nlp_v2/extra_reading.txt @@ -0,0 +1,41 @@ +An information-theoretic perspective of tf–idf measures +https://www.sciencedirect.com/science/article/abs/pii/S0306457302000213 + +A Mathematical Theory of Communication by Claude Shannon +https://people.math.harvard.edu/~ctm/home/text/others/shannon/entropy/entropy.pdf + +TextRank: Bringing Order into Texts +https://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf + +Variations of the Similarity Function of TextRank for Automated Summarization +https://arxiv.org/abs/1602.03606 + +Generic Text Summarization Using Relevance Measure and Latent Semantic Analysis +https://www.cs.bham.ac.uk/~pxt/IDA/text_summary.pdf + +Using Latent Semantic Analysis in Text Summarization and Summary Evaluation +http://textmining.zcu.cz/publications/isim.pdf + +Spam Filtering with Naive Bayes – Which Naive Bayes? +http://www2.aueb.gr/users/ion/docs/ceas2006_paper.pdf + +Sentiment analysis using multinomial logistic regression +https://ieeexplore.ieee.org/document/8226700 + +Latent Dirichlet Allocation +https://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf + +List of Hugging Face Pipelines for NLP +https://lazyprogrammer.me/list-of-hugging-face-pipelines-for-nlp/ + +Indexing by Latent Semantic Analysis (Latent Semantic Indexing) +http://lsa.colorado.edu/papers/JASIS.lsi.90.pdf + +Efficient Estimation of Word Representations in Vector Space (word2vec) +https://arxiv.org/abs/1301.3781 + +GloVe: Global Vectors for Word Representation (GloVe) +https://nlp.stanford.edu/pubs/glove.pdf + +Deep Learning with Tensorflow, a bit more in-depth +https://deeplearningcourses.com/c/deep-learning-tensorflow-2 \ No newline at end of file diff --git a/openai/extra_reading.txt b/openai/extra_reading.txt new file mode 100644 index 00000000..776b62c7 --- /dev/null +++ b/openai/extra_reading.txt @@ -0,0 +1,21 @@ +How to Set Environment Variables Permanently in Windows, Linux, and Mac +https://lazyprogrammer.me/how-to-set-environment-variables-permanently-in-windows-linux-and-mac/ + +How to make your completions outputs consistent with the new seed parameter +https://cookbook.openai.com/examples/reproducible_outputs_with_the_seed_parameter + +What is Temperature in NLP / LLMs? +https://medium.com/@lazyprogrammerofficial/what-is-temperature-in-nlp-llms-aa2a7212e687 + +Large Language Models are Zero-Shot Reasoners (CoT) +https://arxiv.org/abs/2205.11916 + +Chain-of-Thought Prompting Elicits Reasoning in Large Language Models +https://arxiv.org/abs/2201.11903 + +A much better example of "ELI5" +https://www.reddit.com/r/ChatGPT/comments/1c5s51g/my_mother_and_i_had_difficulty_understanding_my + +What is RAG? — Retrieval-Augmented Generation Explained +https://medium.com/@lazyprogrammerofficial/what-is-rag-retrieval-augmented-generation-explained-148c8bb9c00f +https://lazyprogrammer.me/what-is-rag-retrieval-augmented-generation-explained/ \ No newline at end of file diff --git a/openai/fight.mp4 b/openai/fight.mp4 new file mode 100644 index 00000000..24d83be4 Binary files /dev/null and b/openai/fight.mp4 differ diff --git a/openai/finance.png b/openai/finance.png new file mode 100644 index 00000000..b274c4b9 Binary files /dev/null and b/openai/finance.png differ diff --git a/openai/handwriting.jpg b/openai/handwriting.jpg new file mode 100644 index 00000000..2e92adf4 Binary files /dev/null and b/openai/handwriting.jpg differ diff --git a/openai/physics_problem.jpeg b/openai/physics_problem.jpeg new file mode 100644 index 00000000..3f6d3200 Binary files /dev/null and b/openai/physics_problem.jpeg differ diff --git a/openai/replies.json b/openai/replies.json new file mode 100644 index 00000000..27d6761c --- /dev/null +++ b/openai/replies.json @@ -0,0 +1,206 @@ +[ + { + "review": "(1) His answers are sometimes antagonistic but the guy wants us to think by ourselves. I feel he was guided by questions from students with very little background on the subject. (2) Links are not updated. I understand not updated them on the videos, but on the git repository and the scripts, it should have them updated. (3) Explanations are great, with a few inconsistencies when compared to Gemini.google.com understanding. (4) The course content in general is great.", + "response": "(1) I think all students should respect other students taking this course. All students deserve to have their questions answered, and no student can expect to have this course personalized to their own background.\n\n(2) This is incorrect, and you were already instructed on the Q&A to ensure you were looking at the correct repository with the most up-to-date files.\n\n(3) LLMs are known for hallucinating, and their output cannot be trusted, especially if you don't know what you're doing. Instead, you should be using the Q&A to rectify these issues, which is why it's the #1 rule in 'how to succeed in this course'." + }, + { + "review": "You should have explained in the introduction video that, you have not yet figured out Stock forecasting, so explaining your (thoughts or beliefs or work) in this course. But marketing in great way , nothing in content other than playing with data.", + "response": "Try paying attention and understanding the course. If you still believe there's some magic algorithm to perfectly predict stock prices and I \"just haven't figured it out yet\", you clearly weren't listening, know absolutely nothing about finance, and hence, spreading misinformation to readers." + }, + { + "review": "I'm really disappointed. The last update of your codes was 9 years ago. Nothing is running. I tried file after file - nothing works. I don't think you could even continue to sell these courses. Unfortunately, I can no longer cancel the current course, but I will cancel the next course. Your courses do not meet basic standards. Too bad.", + "response": "> The last update of your codes was 9 years ago.\n\nNOTE: Our friend here just doesn't know how to use Git properly. My ML Github repo was CREATED 9 years ago (long before I even started making courses)." + }, + { + "review": "nao é claro ainda como fazer o donlow de githum", + "response": "Thanks for the feedback! Please re-watch the lecture \"Where to get the code / notebooks\" carefully and follow the instructions. It clearly states 5 times (yes, that many times) that the notebooks are not on Github, but rather, are accessed via the code link." + }, + { + "review": "It is a good course about RL, as all his courses, but if you are here for the Trading agent, don't buy it... very basic code and does not really work.", + "response": "Thanks for your feedback! However, it seems you are basing your rating around your own misconceptions and naivete surrounding trading and finance, rather than the quality of the course itself." + }, + { + "review": "Need to explain more about the topic. CNN is more theoretical in the course than programming.", + "response": "Incorrect. There are equal parts theory and programming. Every \"theoretical\" concept is implemented in code. And obviously, you must understand \"what\" you are coding before you code it. Please pay attention to improve your understanding, thanks!" + }, + { + "review": "The content is ok but the links between videos and sections aren't always obvious. I like the fact that it goes in dept on my subjects but the quality of the audio isn't always good enough. I would still recommend to someone that really want to have a better understanding of AI/ML or specifically logistic regression but expect some extra reading if you really want to understand all the concepts well.", + "response": "You should probably revisit the \"introduction and outline\" of the course if you've forgotten the structure of the course. There's only \"extra reading\" if you do not sufficiently meet the prerequisites." + }, + { + "review": "I expected some coding and practice but most of the course till now is just theory", + "response": "That's incorrect, and this is even visible from simply looking at the lecture titles. There are equal parts theory and code. First we discuss \"what\" we will code, then we code it. Obviously, you can't write code without knowing what you're coding first... Please pay attention to improve your understanding, thanks!" + }, + { + "review": "It is all over the place ... not very structured and i have IT and python background , still finding difficult to follow ... i wonder how the Deep learning course will be", + "response": "Unclear why you're having trouble following the structure of such a simple short course. There are only 4 sections: Numpy, Matplotlib, Pandas, and Scipy. Please pay attention to improve your understanding, thanks!" + }, + { + "review": "Content is good so far, but lecturer seems hung up on the behaviors of the participants which is not helping the instruction time.", + "response": "It's important to remember that the course isn't customized for you individually. Therefore, it should be expected that common problems that afflict a non-trivial number of students will be addressed." + }, + { + "review": "Explanation is not clear as we are beginners. May be improve better to understand clearly.", + "response": "Thanks for the feedback! Please ensure you meet the prerequisites as listed twice in the course description and several more times in lectures such as \"how to succeed in this course\"." + }, + { + "review": "Much was great, some frequently explained by referring to other courses as well as skipping some code blocks or instrumental variables, which were readily explained in chat gpt by asking if to add copious explanatory comments. Most, but certainly not all AI ML course instructors are more concerned with taking the time to explain finer details of the code. Early parts on ANN and CNN were excellent in presentation although this was simpler material. In the course was more presenting and explaining than teaching how to code the models, both necessary individually and together sufficient for a solid learning experience. Perhaps sacrifice some optional topics for more time indepth to the course essentials - quantity versus quality.", + "response": "Unclear whether you wrote this comment for the wrong course, as there are no sections about ANNs or CNNs in this course... Furthermore, this course is not about in-depth theory - please check the course description and prerequisites, thanks!" + }, + { + "review": "Some more details about math equations could be added", + "response": "Thanks for the feedback! Please ensure you meet the prerequisites as listed twice in the course description and several more times in lectures such as \"how to succeed in this course\". Furthermore, you may want to read the course description that explains that this course is about Tensorflow 2, not the math behind deep learning. I already have courses on that, so check those out instead." + }, + { + "review": "Teaches only Syntax and not any ML or theory behind how any of the Neural Network architectures work.", + "response": "Incorrect. This course gives an overview of theory, and furthermore, I already have 15+ in-depth DL courses that go into the math behind DL. Luckily, this is all in the course description, which I'm sure you've diligently read. ;)" + }, + { + "review": "It was a good match for my current abilities. I could not access the live python notebooks or links used. I would have appreciated solutions to the end of section exercises. Overall though the instructor is very knowledgeable and the course is free so I can't complain.", + "response": "The key is to follow the instructions. All notebooks and exercise solutions are provided, if you can just follow the instructions on how to get them (clicking links). It really is very easy, it just requires paying attention. :)" + }, + { + "review": "The Neural Network part is not very clear", + "response": "This course doesn't talk about neural networks..." + }, + { + "review": "need more examples", + "response": "The course is full of examples, please use the Q&A if you have difficulties understanding them" + }, + { + "review": "Could use a bit more time spent on explaining certain concepts, like Box-Cox, giving more intuition and explanation of why that is useful", + "response": "Please use the Q&A to inquire about your misunderstandings, as stated in \"how to succeed in this course\"." + }, + { + "review": "Interesting course with lots of examples and lectures. Although some parts of the course become repetitive. There are lectures where he explains the code step by step and then goes on to repeat the same thing in the \"... in Python\" lectures. It would have been nice if he had proposed other exercises with a different dataset than the lectures, even though he does not provide the solution. It is fine to say that we should try to write the code ourselves first and then check the solution, but when this is reduced to copy and paste from the previous lecture it seems ridiculous to me.", + "response": "This is a practical course, meaning that you are shown the code and can do with it what you wish (there's no way to \"exercise\" writing library code without first being shown the syntax, which doesn't make any sense). Additionally, repetition is a research-backed learning technique: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8442015/ Furthermore, please remember that the course is not customized to you. Therefore, if you personally choose to avoid repetition, you are free to do so, nobody is stopping you." + }, + { + "review": "I enjoyed the author explanation on the differences between statistics regression and ML regression content. But I think the lecture on the differences was pedantic and unnecessary even. The content list and summary to this lecture is where the distinction could be made. Instead, it was spent ranting within the lecture, how does that provide any value and meaningful experience? Worry not about the review, if the content is good, the review would come.", + "response": "> But I think the lecture on the differences was pedantic and unnecessary even.\n\nPlease remember, the course is not customized for you personally, it's for all students and therefore addresses common student mistakes, irrespective of your personal needs. Obviously, I have to answer ALL student questions, whether or not you personally find the question/answer helpful." + }, + { + "review": "The instructor seems inadequately prepared for each segment of the course. There is a noticeable absence of demonstrations that are essential for a comprehensive understanding of the material. Furthermore, there is a disproportionate emphasis on paid websites over free alternatives, raising concerns about potential conflicts of interest due to affiliate marketing associations with these paid platforms.", + "response": "Everything is demonstrated in Colab notebooks. I challenge you to list specific examples instead of vague accusations like \"inadequately prepared\" and \"noticeable absence of demonstrations\".\n\n> disproportionate emphasis on paid websites over free alternatives\n\nThat is merely what you are paying attention to and triggered by. Most tools in the course are free or have free alternatives. Furthermore, I don't choose what's free, they are not my companies, so I am unsure why you think this is my fault...?" + }, + { + "review": "I have taken this course and have spent lot of hrs and can tell that, this 12 hrs course will take lot of time to understand and complete (150 hrs). This can be done if we have 100% free time for 4 weeks. Good thing is if you have little idea of Calculus and we flow what is being taught, this course will make us ready to understand ML. Having said that the instructor should again go back and try to minimize the un-necessary or repetitive content.", + "response": "There is no unnecessary or repetitive content, and if you believe there is, you should use the Q&A to clear up any misunderstandings as instructed in the \"how to succeed in this course\" video." + }, + { + "review": "Content is inconsistent in difficulty. Instructor is not good at explaining highly complicated topics that require a lot of Mathematics.", + "response": "Thanks for the feedback! Please ensure you meet the prerequisites as listed twice in the course description and several more times in lectures such as \"how to succeed in this course\". Please recall that each section is denoted as \"beginner\", \"intermediate\", or \"advanced\" (as stated in the intro). One must choose according to their own skill level. This obvious lack of attention explains why you are having trouble understanding the course (since clearly, even the basic info in the intro was beyond your understanding)." + }, + { + "review": "you are READING the slide nicely.", + "response": "Incorrect. The slides provide a summary of the spoken audio, like all courses on this site, all other video-based courses, and all presentations around the world. I'm amused that you would act \"surprised\" by such a normal occurrence... The slides are not being \"read\", and anyone taking the course can confirm this fact readily, so this comment is both inaccurate and misleading to potential students." + }, + { + "review": "There are some good topics explained, but lots of tutorials are just off topic (not about NLP), but various attempts from the instructor the explain to others why they're not able to understand stuff.", + "response": "You just have to remember that the course is not customized for you, it's for everyone. The \"FAQ\" section (meaning \"frequently asked questions\") answers questions other students are asking. It's unkind to suggest that I shouldn't answer questions from your fellow students." + }, + { + "review": "It doesn’t make sense to have to make an account outside of Udemy to then have to read an article that gives an analogy about \"burgers\" just to then inform you that you have to wait 31 days to view the material. Just upload the video like every other instructor and inform us that you have other material. Don’t force us to the material.", + "response": "Incorrect. You don't have to \"make an account outside of Udemy\" to take this course. That is for a different VERSION of the course for students who want to go above and beyond and learn more exciting material, in case they want to learn stuff OUTSIDE the course description [which I'm sure you've diligently read ;)]." + }, + { + "review": "I feel pretty lost, I feel like showing an example of what we're trying to achieve even thought it comes way later in the course would show how the individual parts of the course will play into it.", + "response": "This is what happens when you don't follow the instructions, like meeting the prerequisites or using the Q&A." + }, + { + "review": "Teacher is good at explaining concepts,albeit he has some language problems.", + "response": "English is my only language, but based on your comment alone, it is clear that the language problems may be on your end. I suggest improving in this area to better understand the course, thanks!" + }, + { + "review": "Honestly !! this is highly insufficient material. When we open the books of Machine Learning, we are lost in understanding the mathematical notations. However, this course is teaching Integration way below the levels of Class 12. My comment - It needs improvement", + "response": "Please make sure you read the course description so you understand the purpose of this course. For example, hard integration problems would not serve that purpose." + }, + { + "review": "The course has no depth where the instructor explains an intuition and runs off. The exercises given have no solutions and you have to do it either yourself or you suck and they can't help you! I wouldn't recommend it. Feels more like a refresher course than a course for someone to learn from scratch.", + "response": "> The course has no depth where the instructor explains an intuition and runs off.\n\nIncorrect. Every concept lecture is followed by Python code.\n\n> The exercises given have no solutions\n\nIncorrect. Exercise solutions are in the videos... please pay attention to improve your understanding, thanks." + }, + { + "review": "I feel helped by this course, but I am a bit confused about understanding the Markov model, but in other materials I can smoothly. Thank you for making this class, I hope you are always healthy.", + "response": "Please note that the sections are clearly marked beginner, intermediate, or advanced. This is so that you can stick to your level without being confused about material that is too advanced for you." + }, + { + "review": "Thank you for your prompt response. Let's be frank. I'm no novice to this topic, and I took your course hoping to get a fresh perspective. However, I was met with content that seemed hastily put together and felt more like a reference guide rather than a comprehensive educational course. I've previously enrolled in some of your courses, which were of higher quality. My feedback is based on a comparison with your own past materials. I hope you'll take this as an opportunity to review and enhance the course content for the benefit of future students.", + "response": "All algorithms are derived from scratch and based on the prerequisites, it is not a \"reference guide\". It seems strange that someone who is \"no novice to this topic\" would get those confused..." + }, + { + "review": "Too much talking, less content till now", + "response": "It's a video course, I'm not sure how one would avoid talking..." + }, + { + "review": "While this course has multiple sections on how LP believes you should be learning. That time could have been spent reinforcing some of the more difficult concepts with additional examples.", + "response": "This is a common misunderstanding of the appendix/FAQ. It's not \"That time could have been spent reinforcing some of the more difficult concepts with additional examples\". This content doesn't displace any other content." + }, + { + "review": "Therotical only.....No Example......just copy from book and paste it.......read it.....No Implementation...................", + "response": "Incorrect. Everything has been implemented from scratch. Please pay attention to improve your understanding, and please watch the FAQ lecture \"Beginner's Coding Tips\", thanks!" + }, + { + "review": "I haven't proceeded in the course yet but I wouldn't say I liked the instructor's stance on students asking dumb questions.", + "response": "That seems very weird. In the \"how to succeed\" lecture it clearly states that I encourage any and all questions. Why would you disagree with that?" + }, + { + "review": "If you hang out on YouTube probably you find the same information in the same time", + "response": "You can say that about any subject. The real question is, if it's so easy, then why haven't you done so? ;)" + }, + { + "review": "Generally useful but structure of content and direction of course is not always clear. We jump backwards and forwards between methods more than I would like.", + "response": "Each section is devoted to a different \"method\", there's no jumping \"back and forth\" between them..." + }, + { + "review": "I do not think its advanced stuff at all, nevertheless its good.", + "response": "Read the course description to learn what this course is about. In addition, please see the FAQ, which answers questions such as whether this course is for beginners or experts." + }, + { + "review": "There is constant talk about not having to understand the theory but it seems like the maths goes hand in hand with the models so not sure if it is feasible to just learn the code without understanding why you do certain things", + "response": "Because you're not implementing any of that math yourself, only using high level libraries. Please pay more attention to improve your understanding, thanks!" + }, + { + "review": "not get any technical knowledge yet", + "response": "Why not read the course description so that you understand what this course is about?" + }, + { + "review": "I came here to learn industry level but it does not meet my expectations. this course suits you well for beginners because you can learn all the math and coding from scratch.", + "response": "You simply have an incorrect understanding about what constitutes \"industry level\" (hint: you are not at this level)." + }, + { + "review": "there could be hands-on session rather than pre written code . This would help in understanding the logic better.", + "response": "I've instructed you to code by yourself, not to peek at my prewritten solutions. Therefore, you have simply not followed the instructions. You claim to want to be \"hands-on\", yet you haven't even done the hands-on work I've prescribed." + }, + { + "review": "If you are new to AI, don't take this course. Find something else to start with. Most of what I got from this course is exposure to possibilities with recommender systems. It is not the most organized course either", + "response": "Why should you be new to AI? You should meet the prerequisites, as instructed. Furthermore, it's not \"exposure to possibilities\", we are implementing many concrete algorithms. If you're having trouble understanding how the course is organized, it's one algorithm per section. I suggest simply paying more attention. Thanks!" + }, + { + "review": "sometimes a little superficial", + "response": "Thanks for your feedback. Please make sure to READ the course description so you understand what this course is about before taking it, thanks!" + }, + { + "review": "I am an industry data scientist with an academic background in Machine learning, I have done several deep learning projects in my school years, I am taking this as a refresher. But equations don't have an explanation of variables, and what they stand for, the instructor repetitively mentions that if you don't know any equations you are not ready for this course but no one knows an equation, how it is derived, and what all the greek symbols mean right off the bat especially if you are away from macadamia for a few years. If you add additional resources(eg: citation as you should!) we can read and understand your variables, also many textbooks use different notations, so you need to make your description clear. Also, I don't like the tone and rudeness of the instructor. He sounds like a mad professor, this is a recorded video, so take away the anger", + "response": "> But equations don't have an explanation of variables, and what they stand for\n\nThere is literally a lecture titled \"What do all these symbols and letters mean?\", which is actually a review of previously taught explanations (in other words, variables have been defined multiple times in many cases). Perhaps the problem is that you're simply not paying attention...\n\nAt the very least, thank you for making it obvious to the readers here that your claims are unfounded.\n\n> Also, I don't like the tone and rudeness of the instructor.\n\nYes, I know some students don't like being corrected (as above) and construe all corrections as rude because it's impossible for them to be wrong. But how can a teacher do his job if every correction is interpreted as rude?" + }, + { + "review": "They can't send me slide for this course.", + "response": "Incorrect. Slides are available upon request. Simply use the Q&A as instructed in the how to succeed lecture." + }, + { + "review": "The instructor is poor. Read their responses to other negative reviews. Really off-putting. Constantly stating how people are INCORRECT showing the instructor clearly has no ability to take constructive criticism. If only the instructor could A/B test their own responses. Honestly thought it was a good-ish course. The instructor earned a 1-star here.", + "response": "No, \"incorrect\" is used to denote factually wrong statements. It appears you are too emotional, focusing on your feelings (you're offended that I've corrected others) instead of the facts." + }, + { + "review": "Please respect yourself, you must be ashamed of yourself because of this. Peyser", + "response": "Thanks for your feedback! It'd be great if you could provide specifics regarding what you didn't like about the course..." + }, + { + "review": "Mentor is providing slides for video and he is also fooling student to stay 31 on Udemy but after 31 days I am not finding any slides and note from instructor side.", + "response": "Incorrect. Comment speaks for itself really." + }, + { + "review": "This course kind of breezes over the topics, there are colabs used in the videos that we do not have access to and Section 4 is irrelevant to the course, I would rather see my on the topic and learn something than how to install tools that I already know how to do. Section 4 should actually be on his youtube site. There should also be more links to the tools he speaks of or uses, you have to stop the video and actually go google the tools and search for them. I am still trying to find ffmpegexamples.ipynb that is used for a whole segment of the training but there is no access to. Good course but needs a lot of fine-tuning to be better. I hope to see more added to create better content.", + "response": "> there are colabs used in the videos that we do not have access to\n\nIncorrect. Lecture 4 is called \"COURSE RESOURCES\". Any guesses about what this is for?\n\n> Section 4 is irrelevant to the course\n\nInteresting, who should decide what's relevant to the course? Instructor (who understands the course content) or student (who does not)?\n\n> I would rather see my on the topic\n\nThis is not even a coherent sentence.\n\n> There should also be more links to the tools he speaks of or uses, you have to stop the video and actually go google the tools and search for them\n\nAgain, no you do not. This is what happens when you don't pay attention.\n\n> I am still trying to find ffmpegexamples.ipynb\n\nIf you paid attention, you would have already found it.\n\n> Good course but needs a lot of fine-tuning to be better. I hope to see more added to create better content.\n\nNo, you just need to follow the instructions and use the Q&A to fix your misunderstandings. I don't see how it could be any simpler." + } +] diff --git a/openai/robots_playing_soccer.jpeg b/openai/robots_playing_soccer.jpeg new file mode 100644 index 00000000..547761ae Binary files /dev/null and b/openai/robots_playing_soccer.jpeg differ diff --git a/openai/webdesign.jpg b/openai/webdesign.jpg new file mode 100644 index 00000000..ecce702e Binary files /dev/null and b/openai/webdesign.jpg differ diff --git a/probability/WHERE ARE THE NOTEBOOKS.txt b/probability/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/probability/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/probability/extra_reading.txt b/probability/extra_reading.txt new file mode 100644 index 00000000..e2df1a0d --- /dev/null +++ b/probability/extra_reading.txt @@ -0,0 +1,2 @@ +Multivariate Change of Variables +https://math.libretexts.org/Bookshelves/Calculus/Book%3A_Active_Calculus_(Boelkins_et_al.)/11%3A_Multiple_Integrals/11.09%3A_Change_of_Variables \ No newline at end of file diff --git a/prophet/extra_reading.txt b/prophet/extra_reading.txt new file mode 100644 index 00000000..1e2ea58c --- /dev/null +++ b/prophet/extra_reading.txt @@ -0,0 +1,2 @@ +Forecasting at Scale (Facebook Prophet) +https://peerj.com/preprints/3190.pdf \ No newline at end of file diff --git a/pytorch/.gitignore b/pytorch/.gitignore new file mode 100644 index 00000000..f9187508 --- /dev/null +++ b/pytorch/.gitignore @@ -0,0 +1,3 @@ +*rl_trader_models +*rl_trader_rewards +*.png diff --git a/pytorch/WHERE ARE THE NOTEBOOKS.txt b/pytorch/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/pytorch/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/pytorch/aapl_msi_sbux.csv b/pytorch/aapl_msi_sbux.csv new file mode 100644 index 00000000..cb98cb88 --- /dev/null +++ b/pytorch/aapl_msi_sbux.csv @@ -0,0 +1,1260 @@ +AAPL,MSI,SBUX +67.8542,60.3,28.185 +68.5614,60.9,28.07 +66.8428,60.83,28.13 +66.7156,60.81,27.915 +66.6556,61.12,27.775 +65.7371,61.43,27.17 +65.7128,62.03,27.225 +64.1214,61.26,26.655 +63.7228,60.88,26.675 +64.4014,61.9,27.085 +63.2571,60.28,26.605 +64.1385,60.63,26.64 +63.5099,62.09,27.285 +63.0571,62.21,27.425 +61.4957,62.03,27.435 +60.0071,62.5,27.85 +61.5919,62.97,28.255 +60.8088,63.11,28.55 +61.5117,62.64,29.125 +61.6742,62.75,29.335 +62.5528,62.56,29.305 +61.2042,62.13,29.14 +61.1928,62.22,29.2925 +61.7857,62.34,28.84 +63.3799,62.07,28.83 +65.1028,61.64,28.465 +64.9271,61.67,28.415 +64.5828,62.4,28.715 +64.6756,62.43,28.525 +65.9871,63.61,28.69 +66.2256,63.29,28.345 +65.8765,63.46,28.525 +64.5828,63.56,28.455 +63.2371,64.03,28.475 +61.2728,63.7,28.435 +61.3988,63.7,29.13 +61.7128,62.8,28.85 +61.1028,62.99,29.055 +60.4571,62.67,28.9 +60.8871,63.17,29.06 +60.9971,63.64,28.705 +62.2414,64.69,28.9 +62.0471,64.63,29.2875 +61.3999,63.87,29.545 +59.9785,61.83,28.855 +60.8914,62.96,29.28 +57.5428,62.13,29.085 +56.0071,61.15,28.86 +55.7899,61.72,29.2025 +56.9528,61.78,29.32 +58.0185,61.75,29.695 +57.9231,56.02,29.915 +58.3399,56.39,30.25 +59.6007,56.8,30.0 +61.4457,57.44,30.29 +63.2542,57.2,30.42 +62.7557,56.37,30.07 +63.6457,56.89,30.19 +64.2828,57.29,30.935 +65.8156,56.95,31.24 +65.5225,56.79,31.095 +66.2628,57.0,31.205 +65.2528,56.78,31.18 +64.7099,56.48,31.5485 +64.9628,56.17,31.41 +63.4085,56.89,31.76 +61.2642,57.1,32.035 +62.0825,57.53,31.775 +61.8942,57.84,32.065 +63.2757,58.25,31.915 +62.8085,57.77,32.125 +63.0505,57.3,32.075 +63.1628,57.48,31.76 +63.5928,57.81,31.68 +63.0627,58.53,32.13 +63.5642,58.32,31.815 +64.5114,58.54,31.735 +64.2478,57.96,31.57 +64.3885,57.83,31.73 +64.1871,57.41,31.665 +63.5871,56.27,31.17 +62.6371,56.92,31.51 +63.1158,56.94,32.52 +62.6985,56.61,33.055 +62.5142,56.38,32.71 +61.7414,56.26,32.225 +62.2807,57.19,32.985 +61.4357,56.93,32.8 +61.7142,57.33,33.015 +61.6814,57.35,33.5475 +60.4285,56.78,33.205 +59.5482,55.5,32.61 +59.0714,55.82,32.345 +57.5057,55.59,32.005 +57.5185,56.35,32.37 +56.8671,57.49,32.9 +56.2542,57.84,32.845 +56.6471,57.73,32.755 +58.4599,57.98,33.12 +59.7842,57.49,33.395 +60.1142,57.26,33.65 +59.6314,57.93,33.86 +59.2928,57.86,34.145 +60.3357,58.03,34.065 +60.1042,58.43,34.05 +61.0411,59.05,34.67 +60.9299,59.54,34.86 +61.0628,59.17,34.83 +61.4564,59.32,34.76 +61.4728,59.42,34.1 +61.6797,59.36,34.24 +60.7071,59.85,34.395 +60.9014,59.87,34.51 +59.8557,59.98,33.83 +62.9299,56.04,33.305 +62.6428,54.25,34.085 +62.9985,54.26,36.68 +63.9699,54.01,36.225 +64.7599,54.35,35.965 +64.6471,54.83,35.6445 +65.2394,55.32,36.74 +66.0771,56.02,37.115 +67.0642,56.1,36.985 +66.4642,56.4,36.4 +66.4256,56.48,36.095 +65.8585,57.13,36.47 +64.9214,57.36,36.4 +66.7656,57.44,36.465 +69.9385,57.84,36.32 +71.2142,57.71,35.925 +71.1299,56.96,35.37 +71.7614,57.15,35.355 +72.5342,57.09,35.145 +71.5814,57.05,35.33 +71.7656,56.06,35.3565 +71.8514,56.33,35.95 +71.5742,56.74,35.985 +71.8528,56.55,35.94 +69.7985,56.12,35.08 +70.1279,56.39,35.48 +70.2428,56.19,35.59 +69.6022,56.01,35.26 +69.7971,56.28,35.8 +71.2415,56.08,36.07 +70.7528,56.17,36.025 +71.1742,56.47,35.785 +72.3099,57.59,36.22 +70.6628,57.37,37.1075 +66.8156,57.25,37.695 +67.5271,57.5,37.835 +66.4142,57.46,37.785 +64.3028,57.81,37.62 +65.0456,58.28,38.02 +66.3828,59.26,38.665 +67.4714,59.69,38.175 +66.7728,60.39,38.06 +70.0914,60.37,37.68 +69.8714,59.99,38.275 +68.7899,59.85,38.17 +69.4599,59.87,38.59 +68.9642,59.75,38.665 +68.1071,59.38,38.485 +69.7085,60.89,38.58 +69.9371,60.7,38.595 +69.0585,60.56,38.435 +69.0042,61.14,38.7 +69.6785,60.89,38.4305 +68.7056,59.62,37.765 +69.5125,59.39,37.63 +69.9482,60.61,38.56 +70.4016,60.52,38.91 +70.8628,61.03,39.05 +71.2399,60.49,38.355 +71.5876,60.71,39.02 +72.0714,60.92,39.3675 +72.6985,60.81,39.655 +74.4802,61.18,39.73 +74.2667,60.43,40.45 +74.9942,62.4,40.025 +75.9871,62.51,39.525 +75.1368,62.99,39.98 +75.6965,62.44,39.355 +73.8111,62.73,39.81 +74.9851,62.25,40.415 +74.6716,62.52,40.525 +74.2899,62.39,40.185 +75.2499,62.71,40.185 +75.0641,62.68,40.995 +74.4171,62.65,40.565 +73.2131,62.49,39.535 +74.3656,63.12,40.6 +74.1496,63.51,40.495 +74.2871,64.24,40.3075 +74.3762,64.45,40.7305 +75.4514,64.58,40.57 +74.9986,65.57,40.595 +74.0898,65.42,40.27 +74.2214,64.61,39.96 +73.5714,64.58,39.845 +74.4479,65.41,40.765 +74.2571,65.88,40.675 +74.8199,65.79,40.355 +76.1999,65.57,40.755 +77.9942,65.5,40.81 +79.4385,65.88,40.73 +78.7471,65.66,40.535 +80.9031,65.79,40.275 +80.7142,64.93,39.75 +81.1286,65.23,39.86 +80.0028,66.18,39.97 +80.9185,65.79,39.865 +80.7928,65.41,38.69 +80.1942,64.6,38.2 +80.0771,64.86,38.24 +79.2042,65.05,38.175 +79.6428,65.36,38.23 +79.2842,65.52,38.045 +78.6813,66.16,38.84 +77.7799,65.85,38.575 +78.4314,65.61,38.83 +81.4413,66.78,39.16 +81.0956,67.1,39.285 +80.5571,67.18,39.44 +80.0128,67.33,39.285 +79.2171,67.25,39.275 +80.1456,67.5,39.195 +79.0185,66.33,38.585 +77.2828,66.2,38.475 +77.7042,65.92,38.085 +77.1481,66.19,38.605 +77.6371,65.99,39.015 +76.6455,66.5,38.8 +76.1342,66.15,38.835 +76.5328,65.49,37.56 +78.0556,66.35,37.73 +79.6228,65.62,38.095 +79.1785,65.81,37.645 +77.2385,66.1,37.45 +78.4385,67.11,36.825 +78.7871,64.51,36.8 +79.4542,65.34,36.695 +78.0099,64.42,37.49 +78.6428,64.43,37.105 +72.3571,64.34,36.945 +71.5356,63.98,35.78 +71.3974,64.91,35.955 +71.5142,63.8,35.56 +71.6471,62.72,34.485 +72.6842,62.99,35.325 +73.2271,62.89,35.245 +73.2156,63.4,36.18 +74.2399,64.6,37.0175 +75.5699,65.08,37.4 +76.5656,65.03,37.25 +76.5599,65.78,36.955 +77.7756,65.67,37.345 +77.7128,65.61,37.515 +77.9985,65.78,36.985 +76.7671,64.93,36.66 +75.8785,65.22,36.775 +75.0356,65.02,36.28 +75.3642,64.96,36.28 +74.5799,65.1,35.275 +73.9071,65.45,35.89 +75.3814,65.9,36.095 +75.1771,66.2,35.48 +75.3942,65.98,35.235 +75.8914,66.76,35.83 +76.0514,66.33,35.65 +75.8214,66.57,36.345 +75.7771,66.64,36.535 +75.8456,66.43,36.78 +76.5842,66.08,37.515 +76.6585,65.02,37.815 +75.8071,64.21,37.215 +74.9556,63.67,37.135 +75.2485,65.08,37.09 +75.9142,65.72,37.3 +75.8942,65.7,37.955 +75.5285,66.66,38.4775 +76.1242,66.81,38.355 +77.0271,66.05,37.885 +77.8556,66.18,37.305 +77.1114,65.16,36.77 +76.7799,64.36,36.7 +76.6942,64.3,36.85 +76.6771,64.29,36.69 +77.3785,64.91,37.005 +77.5071,65.1,36.835 +76.9699,65.09,36.545 +75.9742,64.26,35.775 +74.7814,64.43,35.215 +74.7771,64.95,35.74 +75.7599,65.26,36.24 +74.7828,63.99,35.11 +74.2299,63.39,34.365 +74.5256,63.78,34.655 +73.9942,63.37,34.445 +74.1442,63.23,35.395 +74.9914,63.15,35.075 +75.8814,62.51,35.24 +75.9569,63.27,35.5745 +74.9642,63.29,35.195 +81.1099,63.0,35.545 +81.7056,62.5,35.725 +84.8699,62.64,35.465 +84.6185,63.43,35.32 +84.2985,63.58,35.31 +84.4971,62.65,35.56 +84.6542,65.51,35.3 +85.8513,66.15,35.46 +84.9156,66.4,34.79 +84.6185,67.14,34.87 +83.9985,67.38,34.79 +83.6488,67.26,35.145 +84.6899,67.8,35.575 +84.8228,67.75,35.58 +84.8385,67.2,35.085 +84.1171,66.34,34.925 +85.3585,66.3,35.47 +86.3699,66.88,35.51 +86.3871,66.52,35.115 +86.6156,66.89,35.2 +86.7528,66.63,35.7 +87.7328,67.0,35.99 +89.3756,67.02,36.83 +89.1442,66.93,36.635 +90.7685,66.91,36.555 +90.4285,67.42,36.62 +89.8071,67.4,36.925 +91.0771,66.86,37.09 +92.1171,67.23,37.335 +92.4785,67.17,37.36 +92.2242,67.66,37.665 +93.7,67.67,37.59 +94.25,67.7,37.3 +93.86,66.93,37.4 +92.29,66.46,36.98 +91.28,66.78,37.345 +92.2,66.72,37.545 +92.08,66.64,37.655 +92.18,66.62,37.78 +91.86,67.06,38.615 +90.91,67.07,38.3 +90.83,67.1,38.365 +90.28,66.73,38.715 +90.36,66.55,39.06 +90.9,66.56,39.03 +91.98,66.78,38.97 +92.93,66.57,38.69 +93.52,66.96,39.04 +93.48,67.02,39.095 +94.03,67.41,39.53 +95.96799999999999,67.24,39.345 +95.35,66.27,39.28 +95.39,66.58,39.725 +95.035,66.45,39.425 +95.22,66.0,39.3 +96.45,66.08,39.28 +95.32,65.49,39.445 +94.78,65.67,39.365 +93.0899,64.94,38.62 +94.43,65.49,38.97 +93.939,65.74,38.805 +94.72,66.05,39.37 +97.19,65.77,39.57 +97.03,65.61,40.225 +97.671,65.0,39.37 +99.02,65.21,39.18 +98.38,64.74,39.325 +98.15,64.83,39.45 +95.6,63.68,38.84 +96.13,64.11,38.49 +95.59,64.11,38.765 +95.12,61.39,38.395 +94.96,61.21,38.565 +94.48,61.25,38.355 +94.74,62.19,38.81 +95.99,61.73,38.935 +95.97,61.64,38.91 +97.24,62.03,38.62 +97.5,61.52,38.31 +97.98,61.0,38.455 +99.16,60.81,38.795 +100.53,61.37,39.06 +100.57,61.64,39.015 +100.58,61.7,38.735 +101.32,61.23,38.64 +101.54,61.02,38.985 +100.889,60.3,38.895 +102.13,59.68,38.96 +102.25,59.37,38.905 +102.5,59.4,38.905 +103.3,59.01,38.74 +98.94,58.94,38.395 +98.12,58.98,38.58 +98.97,58.89,38.975 +98.36,61.02,38.835 +97.99,61.08,38.56 +101.0,61.22,38.605 +101.43,61.22,38.06 +101.66,61.54,37.735 +101.63,61.42,37.46 +100.86,61.69,37.545 +101.58,61.91,37.67 +101.79,62.04,37.865 +100.96,61.88,38.035 +101.06,61.68,37.3 +102.64,61.57,36.9775 +101.75,61.8,37.66 +97.87,62.24,37.06 +100.75,63.42,37.585 +100.11,63.18,37.635 +100.75,63.28,37.73 +99.18,62.34,37.305 +99.9,61.03,37.225 +99.62,61.3,37.945 +99.62,61.5,37.5725 +98.75,60.47,37.025 +100.8,61.58,37.63 +101.02,60.46,37.24 +100.73,59.05,37.23 +99.81,58.5,36.095 +98.75,58.73,36.37 +97.54,59.32,36.19 +96.26,59.18,36.32 +97.67,60.79,36.77 +99.76,61.25,37.35 +102.47,62.39,37.18 +102.99,61.63,37.3 +104.83,62.25,37.42 +105.22,62.57,37.905 +105.11,62.8,37.985 +106.74,64.06,38.525 +107.34,63.94,38.27 +106.98,63.7,38.66 +108.0,64.5,37.78 +109.4,64.68,38.05 +108.6,66.76,38.355 +108.86,64.46,38.33 +108.7,63.42,38.725 +109.01,64.14,38.895 +108.83,63.94,38.825 +109.7,63.55,38.865 +111.25,63.7,38.925 +112.82,64.43,38.945 +114.18,65.25,39.06 +113.99,65.4,38.915 +115.47,66.0,38.785 +114.67,65.94,38.91 +116.31,65.66,39.1 +116.47,65.27,39.88 +118.625,65.81,40.26 +117.6,65.6,40.105 +119.0,65.56,39.85 +118.93,65.72,40.605 +115.07,65.44,40.425 +114.63,65.51,40.185 +115.93,65.32,40.235 +115.49,65.2,40.655 +115.0,65.0,41.785 +112.4,65.27,41.9 +114.12,65.29,41.515 +111.95,63.52,41.33 +111.62,63.29,41.56 +109.73,62.31,41.625 +108.225,61.91,40.445 +106.745,61.73,39.565 +109.41,63.99,40.2175 +112.65,65.11,40.015 +111.78,65.5,39.72 +112.94,66.53,40.27 +112.54,66.93,40.715 +112.01,67.34,40.635 +113.99,67.49,40.915 +113.91,67.87,41.19 +112.52,67.53,40.895 +110.38,67.08,41.025 +109.33,66.51,40.72 +106.25,65.06,39.94 +106.26,64.51,39.615 +107.75,64.43,40.59 +111.89,65.43,41.245 +112.01,65.11,39.895 +109.25,64.35,40.115 +110.22,64.11,40.435 +109.8,63.76,40.21 +106.82,63.41,39.79 +105.99,64.05,40.305 +108.72,64.02,40.6125 +109.55,64.31,40.645 +112.4,65.36,41.37 +112.98,65.48,44.11 +113.1,65.71,44.06 +109.14,64.94,44.17 +115.31,63.84,43.7825 +118.9,63.83,44.525 +117.16,62.41,43.765 +118.63,62.81,43.995 +118.65,64.01,44.245 +119.56,63.94,44.35 +119.94,64.0,44.82 +118.93,64.66,44.5 +119.72,67.78,44.41 +122.02,68.22,45.59 +124.88,68.57,45.395 +126.46,70.0,45.9125 +127.08,69.91,45.79 +127.83,69.79,46.015 +128.715,69.12,46.5 +128.45,69.03,46.585 +129.495,69.83,46.755 +133.0,68.63,46.79 +132.17,68.53,46.725 +128.79,68.02,47.13 +130.415,68.47,47.275 +128.46,67.94,46.7425 +129.09,68.89,47.1125 +129.36,68.14,47.0 +128.54,67.64,46.53 +126.41,67.93,46.815 +126.6,66.82,46.1075 +127.14,66.57,46.52 +124.51,65.33,46.09 +122.24,65.31,45.71 +124.45,64.96,46.69 +123.59,64.8,46.645 +124.95,65.86,47.0225 +127.04,65.32,47.1925 +128.47,66.65,47.92 +127.495,66.34,48.88 +125.9,66.83,48.73 +127.21,66.52,48.685 +126.69,66.23,48.9575 +123.38,65.35,47.885 +124.24,65.42,47.54 +123.25,65.38,47.535 +126.37,66.39,47.99 +124.43,66.67,47.35 +124.25,66.67,46.51 +125.32,62.51,47.195 +127.35,61.48,47.26 +126.01,61.99,47.035 +125.6,62.42,47.615 +126.56,62.32,47.96 +127.1,62.53,48.17 +126.85,61.97,48.5 +126.3,61.91,48.3 +126.78,61.82,48.14 +126.17,61.86,48.245 +124.75,60.68,47.62 +127.6,61.16,47.97 +126.91,61.43,48.37 +128.62,61.59,48.335 +129.67,60.84,49.43 +130.28,60.57,51.84 +132.65,60.98,50.87 +130.56,60.69,50.61 +128.64,59.74,50.65 +125.15,59.75,49.58 +128.95,60.28,50.29 +128.7,60.68,50.445 +125.8,58.59,49.405 +125.01,58.75,48.93 +125.26,60.01,49.35 +127.62,60.59,49.78 +126.32,59.8,49.5 +125.865,59.42,49.71 +126.01,59.25,49.59 +128.95,59.79,50.555 +128.77,59.3,50.8 +130.19,60.12,51.18 +130.07,59.8,51.42 +130.06,59.8,51.03 +131.39,59.79,51.33 +132.54,59.66,51.48 +129.62,59.11,50.84 +132.045,59.06,51.59 +131.78,59.63,51.81 +130.28,59.0,51.96 +130.535,59.65,52.22 +129.96,59.19,51.73 +130.12,59.48,52.12 +129.36,58.8,51.72 +128.65,58.61,52.19 +127.8,58.08,51.53 +127.42,57.9,51.54 +128.88,58.49,52.69 +128.59,58.55,52.49 +127.17,57.65,52.63 +126.92,57.95,52.27 +127.6,58.18,52.965 +127.3,57.97,53.24 +127.88,58.39,54.11 +126.6,58.05,53.93 +127.61,59.22,53.9 +127.03,59.12,54.115 +128.11,58.29,53.71 +127.5,58.35,54.07 +126.75,58.38,54.62 +124.53,57.14,53.55 +125.425,57.34,53.615 +126.6,57.6,53.89 +126.44,57.51,54.24 +126.0,57.22,54.305 +125.69,57.49,54.375 +122.57,56.79,53.39 +120.07,56.94,54.05 +123.28,57.48,54.57 +125.66,58.43,55.7 +125.61,58.6,55.75 +126.82,58.89,55.34 +128.51,59.29,55.74 +129.62,58.85,55.69 +132.07,59.4,56.21 +130.75,59.57,56.2 +125.22,59.35,56.69 +125.16,58.85,56.56 +124.5,59.5,57.29 +122.77,58.71,56.98 +123.38,59.11,57.14 +122.99,59.58,57.51 +122.37,59.86,58.06 +121.3,60.16,57.93 +118.44,59.76,58.19 +114.64,60.22,58.7 +115.4,64.04,59.01 +115.13,63.8,57.23 +115.52,64.19,57.2 +119.72,63.99,56.27 +113.49,63.35,56.35 +115.24,64.6,56.38 +115.15,64.34,56.85 +115.96,64.98,57.1 +117.16,65.27,57.74 +116.5,65.77,57.83 +115.01,65.35,57.59 +112.65,63.89,55.81 +105.76,62.45,52.84 +103.12,60.79,50.34 +103.74,60.44,51.09 +109.69,63.14,53.96 +112.92,64.29,55.95 +113.29,64.55,55.63 +112.76,64.82,54.71 +107.72,63.85,53.5 +112.34,64.72,55.26 +110.37,65.11,54.69 +109.27,66.31,54.28 +112.31,69.61,55.21 +110.15,68.3,54.69 +112.57,69.09,55.37 +114.21,67.08,56.53 +115.31,66.84,56.29 +116.28,67.15,56.91 +116.41,67.47,57.26 +113.92,67.03,57.28 +113.45,67.09,56.84 +115.21,67.05,57.54 +113.4,66.58,57.12 +114.32,67.8,57.79 +115.0,67.91,58.37 +114.71,69.2,57.99 +112.44,67.93,55.77 +109.06,67.45,55.72 +110.3,68.38,56.84 +109.58,67.76,57.48 +110.38,68.4,58.08 +110.78,69.75,59.04 +111.31,69.19,58.69 +110.78,69.79,58.78 +109.5,69.5,59.46 +112.12,68.78,60.07 +111.6,69.43,60.54 +111.79,69.04,60.16 +110.21,68.7,58.82 +111.86,69.27,59.69 +111.04,69.26,59.93 +111.73,69.03,60.97 +113.77,69.48,60.88 +113.76,69.47,60.53 +115.5,70.48,61.49 +119.08,70.48,62.61 +115.28,70.05,63.43 +114.55,69.96,62.71 +119.27,70.37,63.51 +120.53,70.13,62.5 +119.5,69.97,62.57 +121.18,70.73,62.24 +122.57,71.36,62.8 +122.0,65.24,61.96 +120.92,67.4,62.28 +121.06,68.01,61.97 +120.57,68.2,61.34 +116.77,68.34,62.18 +116.11,70.02,61.87 +115.72,69.44,61.07 +112.34,69.03,59.74 +114.175,70.02,60.68 +113.69,71.05,60.55 +117.29,71.98,61.8 +118.78,72.45,61.46 +119.3,72.19,61.99 +117.75,72.24,62.64 +118.88,71.96,61.96 +118.03,71.83,62.19 +117.81,72.02,62.18 +118.3,71.78,61.39 +117.34,72.05,61.37 +116.28,71.89,61.22 +115.2,71.08,59.55 +119.03,72.11,61.75 +118.28,70.38,61.89 +118.23,69.75,62.16 +115.62,69.31,61.18 +116.17,69.37,61.87 +113.18,68.61,59.82 +112.48,68.14,59.92 +110.49,69.13,59.98 +111.34,69.52,60.35 +108.98,68.56,59.515 +106.03,67.58,58.62 +107.33,68.03,59.54 +107.23,68.87,59.99 +108.61,69.21,60.34 +108.03,69.06,60.32 +106.82,69.18,60.19 +108.74,69.64,61.13 +107.32,69.3,60.82 +105.26,68.45,60.03 +105.35,67.13,58.26 +102.71,66.39,58.65 +100.7,65.43,58.13 +96.45,64.11,56.69 +96.96,64.25,56.63 +98.53,64.37,57.82 +99.96,64.91,59.46 +97.39,63.37,57.87 +99.52,63.11,58.98 +97.13,61.59,58.0 +96.66,61.13,58.55 +96.79,60.36,56.92 +96.3,60.82,59.03 +101.42,62.04,59.17 +99.44,62.42,57.71 +99.99,63.16,58.61 +93.42,64.8,57.63 +94.09,64.74,59.285 +97.34,66.77,60.77 +96.43,66.85,61.4 +94.48,64.32,60.695 +96.35,64.88,59.53 +96.6,64.25,58.29 +94.02,62.82,54.49 +95.01,62.09,54.14 +94.99,62.24,54.42 +94.27,60.97,55.14 +93.7,60.52,54.92 +93.99,61.78,55.86 +96.64,63.42,56.41 +98.12,65.05,57.63 +96.26,64.78,56.96 +96.04,66.0,57.67 +96.88,66.75,58.87 +94.69,70.78,58.46 +96.1,72.84,58.11 +96.76,74.06,58.75 +96.91,74.86,58.34 +96.69,73.49,58.21 +100.53,71.19,60.04 +100.75,71.28,59.56 +101.5,71.25,59.04 +103.01,70.95,58.7 +101.87,71.01,58.0 +101.03,71.1,57.6 +101.12,71.48,57.07 +101.17,71.22,57.52 +102.26,71.2,57.59 +102.52,71.83,58.65 +104.58,71.97,59.08 +105.97,72.24,59.67 +105.8,72.83,59.55 +105.92,72.59,59.7 +105.91,73.12,59.1 +106.72,73.71,59.38 +106.13,73.15,58.83 +105.67,72.59,58.36 +105.19,73.37,58.96 +107.68,74.09,59.55 +109.56,74.89,60.01 +108.99,75.7,59.7 +109.99,76.11,61.02 +111.12,76.32,60.25 +109.81,75.71,60.04 +110.96,76.09,60.83 +108.54,74.99,61.17 +108.66,75.24,61.04 +109.02,74.88,60.9 +110.44,75.04,59.5 +112.04,75.37,60.21 +112.1,75.31,60.13 +109.85,75.64,60.51 +107.48,75.69,60.89 +106.91,75.97,60.9 +107.13,75.55,60.9 +105.97,74.99,60.64 +105.68,75.56,57.68 +105.08,75.51,57.77 +104.35,75.9,57.72 +97.82,76.04,56.9 +94.83,75.34,56.42 +93.74,75.19,56.23 +93.64,76.0,57.36 +95.18,74.96,56.25 +94.19,74.22,56.39 +93.24,74.25,56.25 +92.72,70.54,56.31 +92.79,70.82,56.64 +93.42,71.05,57.49 +92.51,70.07,56.23 +90.34,71.11,56.3 +90.52,70.62,55.82 +93.88,70.83,55.53 +93.49,69.89,54.88 +94.56,69.46,54.8 +94.2,68.72,54.55 +95.22,68.75,54.62 +96.43,68.78,54.6 +97.9,69.68,55.44 +99.62,69.35,55.15 +100.41,69.4,55.29 +100.35,69.5,55.15 +99.86,69.27,54.89 +98.46,69.06,54.82 +97.72,68.8,54.62 +97.92,68.47,54.61 +98.63,68.77,55.59 +99.03,68.16,55.3 +98.94,69.05,55.22 +99.65,68.56,55.58 +98.83,67.45,54.865 +97.34,66.82,55.04 +97.46,67.24,55.57 +97.14,67.54,55.35 +97.55,67.8,55.53 +95.33,67.33,55.31 +95.1,68.35,55.38 +95.91,67.81,55.81 +95.55,67.43,55.61 +96.1,68.01,56.13 +93.4,64.73,54.68 +92.04,63.08,53.69 +93.59,63.69,54.85 +94.4,64.55,56.74 +95.6,65.97,57.12 +95.89,66.01,56.99 +94.99,64.77,56.77 +95.53,65.3,56.75 +95.94,65.05,56.91 +96.68,66.38,56.51 +96.98,66.62,56.32 +97.42,67.4,57.48 +96.87,67.46,56.48 +98.79,67.58,57.59 +98.78,67.4,57.41 +99.83,67.55,56.92 +99.87,67.5,56.76 +99.96,67.93,57.54 +99.43,67.55,57.6 +98.66,68.25,57.9 +97.34,68.09,57.95 +96.67,68.42,58.31 +102.95,69.26,57.85 +104.34,69.58,58.21 +104.21,69.38,58.05 +106.05,69.63,57.63 +104.48,68.84,56.73 +105.79,69.29,55.94 +105.87,70.24,55.42 +107.48,73.5,55.9 +108.37,73.93,55.36 +108.81,74.28,55.2 +108.0,74.28,55.62 +107.93,75.52,55.47 +108.18,74.54,55.47 +109.48,75.44,55.25 +109.38,75.58,55.37 +109.22,75.68,55.8 +109.08,75.99,55.53 +109.36,76.34,54.94 +108.51,76.49,55.85 +108.85,76.99,56.4 +108.03,77.12,57.09 +107.57,77.18,57.29 +106.94,77.2,57.29 +106.82,77.29,56.8 +106.0,77.51,56.4 +106.1,76.99,56.23 +106.73,76.8,56.31 +107.73,77.95,56.18 +107.7,78.32,56.02 +108.36,78.08,56.32 +105.52,77.37,55.3 +103.13,76.65,54.35 +105.44,77.23,54.71 +107.95,76.09,53.98 +111.77,75.47,53.9 +115.57,76.04,54.11 +114.92,75.63,53.74 +113.58,75.76,53.01 +113.57,75.21,53.3 +113.55,75.73,53.98 +114.62,76.19,54.39 +112.71,76.11,54.43 +112.88,75.95,54.04 +113.09,76.32,54.19 +113.95,76.79,53.98 +112.18,77.21,53.45 +113.05,76.28,54.14 +112.52,75.25,53.84 +113.0,74.42,53.53 +113.05,74.35,53.35 +113.89,74.64,53.14 +114.06,74.48,53.46 +116.05,74.67,53.3 +116.3,73.5,52.92 +117.34,73.76,53.16 +116.98,73.06,52.95 +117.63,73.58,53.08 +117.55,73.13,52.76 +117.47,73.8,52.61 +117.12,73.8,53.15 +117.06,73.57,53.59 +116.6,73.62,53.63 +117.65,74.49,54.18 +118.25,74.16,53.67 +115.59,73.58,53.63 +114.48,73.48,53.59 +113.72,72.83,53.53 +113.54,72.58,53.07 +111.49,72.32,52.5 +111.59,71.57,52.98 +109.83,71.29,51.77 +108.84,75.9,52.75 +110.41,77.71,54.49 +111.06,78.56,54.62 +110.88,78.96,54.58 +107.79,79.19,53.57 +108.43,80.38,53.93 +105.71,80.6,54.22 +107.11,81.8,54.59 +109.99,80.51,55.44 +109.95,80.35,55.85 +110.06,79.98,55.77 +111.73,79.83,56.1 +111.8,80.31,57.12 +111.23,80.26,57.59 +111.79,80.98,57.43 +111.57,80.86,57.59 +111.46,81.11,58.17 +110.52,80.25,57.97 +109.49,79.19,58.51 +109.9,79.5,57.21 +109.11,80.92,57.5 +109.95,82.22,57.44 +111.03,83.27,58.76 +112.12,83.3,58.65 +113.95,82.79,58.75 +113.3,82.6,58.77 +115.19,83.24,59.31 +115.19,82.9,58.75 +115.82,83.46,57.71 +115.97,83.4,57.66 +116.64,83.93,57.65 +116.95,83.76,57.7 +117.06,84.0,57.44 +116.29,83.72,57.11 +116.52,83.41,57.01 +117.26,83.52,56.86 +116.76,82.86,56.35 +116.73,82.87,56.32 +115.82,82.89,55.52 +116.15,83.6,55.35 +116.02,83.49,55.99 +116.61,82.64,56.46 +117.91,82.89,57.13 +118.99,83.02,58.2 +119.11,82.63,57.88 +119.75,82.88,58.1 +119.25,82.18,58.03 +119.04,82.27,57.85 +120.0,80.73,58.0 +119.99,81.65,58.45 +119.78,81.86,57.89 +120.0,82.36,57.66 +120.08,82.44,57.76 +119.97,84.35,58.44 +121.88,85.29,58.7 +121.94,83.36,58.46 +121.95,82.98,56.12 +121.63,81.7,55.9 +121.35,80.71,55.22 +128.75,80.03,53.9 +128.53,81.0,53.87 +129.08,81.6,55.06 +130.29,81.73,55.73 +131.53,77.34,55.24 +132.04,78.25,55.22 +132.42,77.81,55.81 +132.12,78.37,56.22 +133.29,78.48,56.11 +135.02,78.68,56.58 +135.51,79.4,56.86 +135.345,78.66,56.73 +135.72,79.31,57.35 +136.7,80.15,57.54 +137.11,79.65,57.57 +136.53,79.36,57.64 +136.66,80.27,57.48 +136.93,79.28,56.78 +136.99,78.97,56.87 +139.79,79.98,57.14 +138.96,80.02,57.12 +139.78,80.55,57.1 +139.34,79.97,56.68 +139.52,79.66,56.2 +139.0,80.2,55.74 +138.68,81.37,55.19 +139.14,82.1,54.53 +139.2,81.65,54.63 +138.99,83.36,54.27 +140.46,85.24,54.54 +140.69,85.15,54.8 +139.99,84.72,55.78 +141.46,84.3,55.81 +139.84,83.76,55.54 +141.42,83.59,55.89 +140.92,83.74,55.85 +140.64,83.67,56.81 +140.88,84.0,57.23 +143.8,84.0,57.35 +144.12,84.13,57.54 +143.93,84.87,58.16 +143.66,86.22,58.39 +143.7,84.83,58.44 +144.77,84.52,58.32 +144.02,83.83,58.22 +143.66,84.2,57.92 +143.34,84.25,58.02 +143.17,83.71,57.95 +141.63,83.45,57.88 +141.8,82.84,57.58 +141.05,82.34,57.51 +141.83,83.08,58.08 +141.2,82.64,58.35 +140.68,83.37,59.04 +142.44,84.1,60.08 +142.27,83.72,60.61 +143.64,84.72,61.11 +144.53,85.39,60.96 +143.68,85.38,61.56 +143.79,86.07,61.3 +143.65,85.97,60.06 +146.58,86.16,60.18 +147.51,85.92,60.5 +147.06,86.37,60.59 +146.53,86.1,60.83 +148.96,84.44,60.95 +153.01,83.59,60.94 +153.99,84.77,60.98 +153.26,85.77,60.66 +153.95,85.36,60.27 +156.1,84.21,59.93 +155.7,84.48,60.45 +155.47,83.7,59.98 +150.25,81.85,59.73 +152.54,80.83,59.82 +153.06,80.83,61.36 +153.99,82.93,61.23 +153.8,82.11,61.15 +153.34,82.22,61.89 +153.87,82.27,62.9 +153.61,81.86,63.3 +153.67,82.83,63.26 +152.76,83.57,63.61 +153.18,85.64,63.75 +155.45,86.62,64.57 +153.93,87.31,64.27 +154.45,87.46,64.16 +155.37,86.18,63.5 +154.99,86.65,62.24 +148.98,86.17,62.19 +145.42,86.11,61.29 +146.59,86.04,60.92 +145.16,84.87,60.27 +144.29,84.45,60.09 +142.27,84.72,60.14 +146.34,86.2,60.9 +145.01,85.74,59.86 +145.87,86.24,59.96 +145.63,87.36,59.51 +146.28,88.64,59.81 +145.82,88.42,59.64 +143.73,87.72,58.96 +145.83,88.13,59.18 +143.68,86.8,58.36 +144.02,86.74,58.31 +143.5,86.68,58.25 +144.09,86.83,57.94 +142.73,85.96,57.6 +144.18,87.31,58.04 +145.06,87.23,57.81 +145.53,87.65,57.9 +145.74,88.65,58.54 +147.77,88.33,58.38 +149.04,88.61,58.76 +149.56,88.28,58.33 +150.08,88.45,58.21 +151.02,89.78,58.11 +150.34,89.96,58.03 +150.27,90.52,57.98 +152.09,90.67,58.02 +152.74,91.39,58.55 +153.46,91.84,57.94 +150.56,92.21,59.5 +149.5,91.01,54.0 +148.73,90.68,53.98 +158.59,90.43,54.73 +157.14,90.43,55.43 +155.57,90.4,55.68 +156.39,90.37,55.44 +158.81,89.2,55.63 +160.08,88.58,54.52 +161.06,88.51,53.74 +155.32,86.99,53.07 +157.48,87.48,53.18 +159.85,88.6,53.22 +161.6,87.92,53.15 +160.95,88.19,53.5 +157.86,87.13,53.04 +157.5,87.37,52.7 +157.21,87.2,53.15 +159.78,86.41,54.45 +159.98,86.21,54.08 +159.27,86.51,53.94 +159.86,86.88,54.36 +161.47,87.3,54.4 +162.91,86.94,54.1 +163.35,87.66,54.52 +164.0,88.12,54.86 +164.05,87.87,54.93 +162.08,86.66,55.13 +161.91,85.72,54.31 +161.26,86.35,53.47 +158.63,85.12,53.49 +161.5,87.01,54.02 +160.86,87.74,53.54 +159.65,85.97,54.29 +158.28,84.58,54.53 +159.88,85.48,54.67 +158.67,85.48,54.69 +158.73,85.84,54.62 +156.07,85.65,55.15 +153.39,84.99,55.01 +151.89,84.29,55.09 +150.55,83.5,54.95 +153.14,83.02,55.13 +154.23,84.1,54.99 +153.28,83.69,54.5 +154.12,84.87,53.71 +153.81,85.84,53.81 +154.48,85.69,53.99 +153.48,85.64,53.93 +155.39,86.0,54.6 +155.3,89.44,55.17 +155.84,89.1,55.02 +155.9,89.08,55.42 +156.55,89.26,55.64 +156.0,89.8,55.97 +156.99,89.93,55.72 +159.88,89.36,54.91 +160.47,88.88,54.51 +159.76,89.09,55.21 +155.98,89.65,55.4 +156.25,90.0,54.57 +156.17,89.94,54.27 +157.1,90.23,54.28 +156.41,90.04,54.16 +157.41,90.28,54.91 +163.05,91.19,54.88 +166.72,90.37,55.17 +169.04,90.54,54.84 +166.89,90.56,55.13 +168.11,90.02,54.87 +172.5,94.25,56.03 +174.25,92.43,56.57 +174.81,92.11,57.22 +176.24,92.66,57.91 +175.88,91.61,57.36 +174.67,91.07,57.04 +173.97,91.37,56.64 +171.34,91.02,56.93 +169.08,90.39,56.7 +171.1,90.97,57.24 +170.15,90.95,56.93 +169.98,92.33,56.81 +173.14,92.45,57.26 +174.96,91.83,57.14 +174.97,92.36,56.8 +174.09,92.88,55.91 +173.07,94.53,56.66 +169.48,94.17,57.51 +171.85,94.11,57.82 +171.05,93.03,57.32 +169.8,93.63,58.76 +169.64,90.66,59.34 +169.01,91.29,59.28 +169.32,92.8,59.14 +169.37,92.52,58.61 +172.67,92.33,59.07 +171.7,93.37,59.27 +172.27,93.94,59.49 +172.22,92.2,59.7 +173.97,93.15,58.29 +176.42,94.49,58.03 +174.54,93.28,58.01 +174.35,92.1,57.73 +175.01,91.62,57.58 +175.01,90.76,57.3 +170.57,90.67,57.14 +170.6,90.8,57.27 +171.08,90.57,57.81 +169.23,90.34,57.43 +172.26,90.55,57.63 +172.23,89.91,58.71 +173.03,90.66,58.93 +175.0,91.88,59.61 +174.35,92.82,59.31 +174.33,92.12,59.18 +174.29,92.38,59.82 +175.28,93.55,60.0 +177.09,96.57,60.4 +176.19,95.86,60.56 +179.1,97.28,60.66 +179.26,97.5,61.09 +178.46,97.8,61.26 +177.0,97.33,61.41 +177.04,96.76,61.69 +174.22,95.84,60.83 +171.11,97.68,60.55 +171.51,99.0,57.99 +167.96,99.18,57.02 +166.97,99.8,57.19 +167.43,99.46,56.81 +167.78,99.12,56.0 +160.5,103.87,55.77 +156.49,101.06,54.69 +163.03,102.76,55.61 +159.54,102.63,54.46 diff --git a/pytorch/ann_regression.py b/pytorch/ann_regression.py new file mode 100644 index 00000000..65d66250 --- /dev/null +++ b/pytorch/ann_regression.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +"""PyTorch Regression.ipynb + +Automatically generated by Colaboratory. + +Original file is located at + https://colab.research.google.com/drive/1pEjzEmbnu2wXAhIaBS8PSpi-0cWtR6ov +""" + +import torch +import torch.nn as nn +import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D + +# Make the dataset +N = 1000 +X = np.random.random((N, 2)) * 6 - 3 # uniformly distributed between (-3, +3) +Y = np.cos(2*X[:,0]) + np.cos(3*X[:,1]) + +# Plot it +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +ax.scatter(X[:,0], X[:,1], Y) +plt.show() + +# Build the model +model = nn.Sequential( + nn.Linear(2, 128), + nn.ReLU(), + nn.Linear(128, 1) +) + +# Loss and optimizer +criterion = nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + +# Train the model +def full_gd(model, criterion, optimizer, X_train, y_train, epochs=1000): + # Stuff to store + train_losses = np.zeros(epochs) + + for it in range(epochs): + # zero the parameter gradients + optimizer.zero_grad() + + # Forward pass + outputs = model(X_train) + loss = criterion(outputs, y_train) + + # Backward and optimize + loss.backward() + optimizer.step() + + # Save losses + train_losses[it] = loss.item() + + if (it + 1) % 50 == 0: + print(f'Epoch {it+1}/{epochs}, Train Loss: {loss.item():.4f}') + + return train_losses + +X_train = torch.from_numpy(X.astype(np.float32)) +y_train = torch.from_numpy(Y.astype(np.float32).reshape(-1, 1)) +train_losses = full_gd(model, criterion, optimizer, X_train, y_train) + +plt.plot(train_losses) +plt.show() + + +# Plot the prediction surface +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +ax.scatter(X[:,0], X[:,1], Y) + +# surface plot +with torch.no_grad(): + line = np.linspace(-3, 3, 50) + xx, yy = np.meshgrid(line, line) + Xgrid = np.vstack((xx.flatten(), yy.flatten())).T + Xgrid_torch = torch.from_numpy(Xgrid.astype(np.float32)) + Yhat = model(Xgrid_torch).numpy().flatten() + ax.plot_trisurf(Xgrid[:,0], Xgrid[:,1], Yhat, linewidth=0.2, antialiased=True) + plt.show() + +# Can it extrapolate? +# Plot the prediction surface +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +ax.scatter(X[:,0], X[:,1], Y) + +# surface plot +with torch.no_grad(): + line = np.linspace(-5, 5, 50) + xx, yy = np.meshgrid(line, line) + Xgrid = np.vstack((xx.flatten(), yy.flatten())).T + Xgrid_torch = torch.from_numpy(Xgrid.astype(np.float32)) + Yhat = model(Xgrid_torch).numpy().flatten() + ax.plot_trisurf(Xgrid[:,0], Xgrid[:,1], Yhat, linewidth=0.2, antialiased=True) + plt.show() \ No newline at end of file diff --git a/pytorch/exercises.txt b/pytorch/exercises.txt new file mode 100644 index 00000000..6fdee299 --- /dev/null +++ b/pytorch/exercises.txt @@ -0,0 +1,36 @@ +Logistic Regression +https://www.kaggle.com/uciml/pima-indians-diabetes-database +https://lazyprogrammer.me/course_files/exercises/diabetes.csv + +Linear Regression +https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html +https://lazyprogrammer.me/course_files/exercises/boston.txt + +ANN +https://archive.ics.uci.edu/ml/datasets/ecoli (orig) +https://www.kaggle.com/elikplim/ecoli-data-set (alt) +https://lazyprogrammer.me/course_files/exercises/ecoli.csv + +CNN +https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge +https://archive.org/download/fer2013_202311/fer2013.csv + +RNN +Find your own stock price dataset! + +NLP +https://www.kaggle.com/crowdflower/twitter-airline-sentiment +https://lazyprogrammer.me/course_files/exercises/AirlineSentimentTweets.csv + +Recommender Systems +http://www2.informatik.uni-freiburg.de/~cziegler/BX/ +http://lazyprogrammer.me/course_files/exercises/BX-CSV-Dump.zip + +Transfer Learning +https://www.kaggle.com/c/dogs-vs-cats + +GAN +https://www.kaggle.com/c/dogs-vs-cats + +DeepRL +Find your own stock price dataset! \ No newline at end of file diff --git a/pytorch/extra_reading.txt b/pytorch/extra_reading.txt new file mode 100644 index 00000000..7fccf01f --- /dev/null +++ b/pytorch/extra_reading.txt @@ -0,0 +1,30 @@ +Gradient Descent: Convergence Analysis +http://www.stat.cmu.edu/~ryantibs/convexopt-F13/scribes/lec6.pdf + +Deep learning improved by biological activation functions +https://arxiv.org/pdf/1804.11237.pdf + +Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift +Sergey Ioffe, Christian Szegedy +https://arxiv.org/abs/1502.03167 + +Dropout: A Simple Way to Prevent Neural Networks from Overfitting +https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf + +Implementing Dropout +https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow/ + +Convolution arithmetic tutorial +https://theano-pymc.readthedocs.io/en/latest/tutorial/conv_arithmetic.html + +On the Practical Computational Power of Finite Precision RNNs for Language Recognition +https://arxiv.org/abs/1805.04908 + +Massive Exploration of Neural Machine Translation Architectures +https://arxiv.org/abs/1703.03906 + +Practical Deep Reinforcement Learning Approach for Stock Trading +https://arxiv.org/abs/1811.07522 + +Inceptionism: Going Deeper into Neural Networks +https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html \ No newline at end of file diff --git a/pytorch/plot_rl_rewards.py b/pytorch/plot_rl_rewards.py new file mode 100644 index 00000000..3eb8e171 --- /dev/null +++ b/pytorch/plot_rl_rewards.py @@ -0,0 +1,22 @@ +import matplotlib.pyplot as plt +import numpy as np +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') +args = parser.parse_args() + +a = np.load(f'rl_trader_rewards/{args.mode}.npy') + +print(f"average reward: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}") + +if args.mode == 'train': + # show the training progress + plt.plot(a) +else: + # test - show a histogram of rewards + plt.hist(a, bins=20) + +plt.title(args.mode) +plt.show() \ No newline at end of file diff --git a/pytorch/rl_trader.py b/pytorch/rl_trader.py new file mode 100644 index 00000000..5738c9ac --- /dev/null +++ b/pytorch/rl_trader.py @@ -0,0 +1,441 @@ +import numpy as np +import pandas as pd + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from datetime import datetime +import itertools +import argparse +import re +import os +import pickle + +from sklearn.preprocessing import StandardScaler + + +# Let's use AAPL (Apple), MSI (Motorola), SBUX (Starbucks) +def get_data(): + # returns a T x 3 list of stock prices + # each row is a different stock + # 0 = AAPL + # 1 = MSI + # 2 = SBUX + df = pd.read_csv('aapl_msi_sbux.csv') + return df.values + + + +### The experience replay memory ### +class ReplayBuffer: + def __init__(self, obs_dim, act_dim, size): + self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.acts_buf = np.zeros(size, dtype=np.uint8) + self.rews_buf = np.zeros(size, dtype=np.float32) + self.done_buf = np.zeros(size, dtype=np.uint8) + self.ptr, self.size, self.max_size = 0, 0, size + + def store(self, obs, act, rew, next_obs, done): + self.obs1_buf[self.ptr] = obs + self.obs2_buf[self.ptr] = next_obs + self.acts_buf[self.ptr] = act + self.rews_buf[self.ptr] = rew + self.done_buf[self.ptr] = done + self.ptr = (self.ptr+1) % self.max_size + self.size = min(self.size+1, self.max_size) + + def sample_batch(self, batch_size=32): + idxs = np.random.randint(0, self.size, size=batch_size) + return dict(s=self.obs1_buf[idxs], + s2=self.obs2_buf[idxs], + a=self.acts_buf[idxs], + r=self.rews_buf[idxs], + d=self.done_buf[idxs]) + + + + + +def get_scaler(env): + # return scikit-learn scaler object to scale the states + # Note: you could also populate the replay buffer here + + states = [] + for _ in range(env.n_step): + action = np.random.choice(env.action_space) + state, reward, done, info = env.step(action) + states.append(state) + if done: + break + + scaler = StandardScaler() + scaler.fit(states) + return scaler + + + + +def maybe_make_dir(directory): + if not os.path.exists(directory): + os.makedirs(directory) + + + + +class MLP(nn.Module): + def __init__(self, n_inputs, n_action, n_hidden_layers=1, hidden_dim=32): + super(MLP, self).__init__() + + M = n_inputs + self.layers = [] + for _ in range(n_hidden_layers): + layer = nn.Linear(M, hidden_dim) + M = hidden_dim + self.layers.append(layer) + self.layers.append(nn.ReLU()) + + # final layer + self.layers.append(nn.Linear(M, n_action)) + self.layers = nn.Sequential(*self.layers) + + def forward(self, X): + return self.layers(X) + + def save_weights(self, path): + torch.save(self.state_dict(), path) + + def load_weights(self, path): + self.load_state_dict(torch.load(path)) + + + +def predict(model, np_states): + with torch.no_grad(): + inputs = torch.from_numpy(np_states.astype(np.float32)) + output = model(inputs) + # print("output:", output) + return output.numpy() + + + +def train_one_step(model, criterion, optimizer, inputs, targets): + # convert to tensors + inputs = torch.from_numpy(inputs.astype(np.float32)) + targets = torch.from_numpy(targets.astype(np.float32)) + + # zero the parameter gradients + optimizer.zero_grad() + + # Forward pass + outputs = model(inputs) + loss = criterion(outputs, targets) + + # Backward and optimize + loss.backward() + optimizer.step() + + + +class MultiStockEnv: + """ + A 3-stock trading environment. + State: vector of size 7 (n_stock * 2 + 1) + - # shares of stock 1 owned + - # shares of stock 2 owned + - # shares of stock 3 owned + - price of stock 1 (using daily close price) + - price of stock 2 + - price of stock 3 + - cash owned (can be used to purchase more stocks) + Action: categorical variable with 27 (3^3) possibilities + - for each stock, you can: + - 0 = sell + - 1 = hold + - 2 = buy + """ + def __init__(self, data, initial_investment=20000): + # data + self.stock_price_history = data + self.n_step, self.n_stock = self.stock_price_history.shape + + # instance attributes + self.initial_investment = initial_investment + self.cur_step = None + self.stock_owned = None + self.stock_price = None + self.cash_in_hand = None + + self.action_space = np.arange(3**self.n_stock) + + # action permutations + # returns a nested list with elements like: + # [0,0,0] + # [0,0,1] + # [0,0,2] + # [0,1,0] + # [0,1,1] + # etc. + # 0 = sell + # 1 = hold + # 2 = buy + self.action_list = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock))) + + # calculate size of state + self.state_dim = self.n_stock * 2 + 1 + + self.reset() + + + def reset(self): + self.cur_step = 0 + self.stock_owned = np.zeros(self.n_stock) + self.stock_price = self.stock_price_history[self.cur_step] + self.cash_in_hand = self.initial_investment + return self._get_obs() + + + def step(self, action): + assert action in self.action_space + + # get current value before performing the action + prev_val = self._get_val() + + # update price, i.e. go to the next day + self.cur_step += 1 + self.stock_price = self.stock_price_history[self.cur_step] + + # perform the trade + self._trade(action) + + # get the new value after taking the action + cur_val = self._get_val() + + # reward is the increase in porfolio value + reward = cur_val - prev_val + + # done if we have run out of data + done = self.cur_step == self.n_step - 1 + + # store the current value of the portfolio here + info = {'cur_val': cur_val} + + # conform to the Gym API + return self._get_obs(), reward, done, info + + + def _get_obs(self): + obs = np.empty(self.state_dim) + obs[:self.n_stock] = self.stock_owned + obs[self.n_stock:2*self.n_stock] = self.stock_price + obs[-1] = self.cash_in_hand + return obs + + + + def _get_val(self): + return self.stock_owned.dot(self.stock_price) + self.cash_in_hand + + + def _trade(self, action): + # index the action we want to perform + # 0 = sell + # 1 = hold + # 2 = buy + # e.g. [2,1,0] means: + # buy first stock + # hold second stock + # sell third stock + action_vec = self.action_list[action] + + # determine which stocks to buy or sell + sell_index = [] # stores index of stocks we want to sell + buy_index = [] # stores index of stocks we want to buy + for i, a in enumerate(action_vec): + if a == 0: + sell_index.append(i) + elif a == 2: + buy_index.append(i) + + # sell any stocks we want to sell + # then buy any stocks we want to buy + if sell_index: + # NOTE: to simplify the problem, when we sell, we will sell ALL shares of that stock + for i in sell_index: + self.cash_in_hand += self.stock_price[i] * self.stock_owned[i] + self.stock_owned[i] = 0 + if buy_index: + # NOTE: when buying, we will loop through each stock we want to buy, + # and buy one share at a time until we run out of cash + can_buy = True + while can_buy: + for i in buy_index: + if self.cash_in_hand > self.stock_price[i]: + self.stock_owned[i] += 1 # buy one share + self.cash_in_hand -= self.stock_price[i] + else: + can_buy = False + + + + + +class DQNAgent(object): + def __init__(self, state_size, action_size): + self.state_size = state_size + self.action_size = action_size + self.memory = ReplayBuffer(state_size, action_size, size=500) + self.gamma = 0.95 # discount rate + self.epsilon = 1.0 # exploration rate + self.epsilon_min = 0.01 + self.epsilon_decay = 0.995 + self.model = MLP(state_size, action_size) + + # Loss and optimizer + self.criterion = nn.MSELoss() + self.optimizer = torch.optim.Adam(self.model.parameters()) + + + def update_replay_memory(self, state, action, reward, next_state, done): + self.memory.store(state, action, reward, next_state, done) + + + def act(self, state): + if np.random.rand() <= self.epsilon: + return np.random.choice(self.action_size) + act_values = predict(self.model, state) + return np.argmax(act_values[0]) # returns action + + + def replay(self, batch_size=32): + # first check if replay buffer contains enough data + if self.memory.size < batch_size: + return + + # sample a batch of data from the replay memory + minibatch = self.memory.sample_batch(batch_size) + states = minibatch['s'] + actions = minibatch['a'] + rewards = minibatch['r'] + next_states = minibatch['s2'] + done = minibatch['d'] + + # Calculate the target: Q(s',a) + target = rewards + (1 - done) * self.gamma * np.amax(predict(self.model, next_states), axis=1) + + # With the PyTorch API, it is simplest to have the target be the + # same shape as the predictions. + # However, we only need to update the network for the actions + # which were actually taken. + # We can accomplish this by setting the target to be equal to + # the prediction for all values. + # Then, only change the targets for the actions taken. + # Q(s,a) + target_full = predict(self.model, states) + target_full[np.arange(batch_size), actions] = target + + # Run one training step + train_one_step(self.model, self.criterion, self.optimizer, states, target_full) + + if self.epsilon > self.epsilon_min: + self.epsilon *= self.epsilon_decay + + + def load(self, name): + self.model.load_weights(name) + + + def save(self, name): + self.model.save_weights(name) + + +def play_one_episode(agent, env, is_train): + # note: after transforming states are already 1xD + state = env.reset() + state = scaler.transform([state]) + done = False + + while not done: + action = agent.act(state) + next_state, reward, done, info = env.step(action) + next_state = scaler.transform([next_state]) + if is_train == 'train': + agent.update_replay_memory(state, action, reward, next_state, done) + agent.replay(batch_size) + state = next_state + + return info['cur_val'] + + + +if __name__ == '__main__': + + # config + models_folder = 'rl_trader_models' + rewards_folder = 'rl_trader_rewards' + num_episodes = 2000 + batch_size = 32 + initial_investment = 20000 + + + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') + args = parser.parse_args() + + maybe_make_dir(models_folder) + maybe_make_dir(rewards_folder) + + data = get_data() + n_timesteps, n_stocks = data.shape + + n_train = n_timesteps // 2 + + train_data = data[:n_train] + test_data = data[n_train:] + + env = MultiStockEnv(train_data, initial_investment) + state_size = env.state_dim + action_size = len(env.action_space) + agent = DQNAgent(state_size, action_size) + scaler = get_scaler(env) + + # store the final value of the portfolio (end of episode) + portfolio_value = [] + + if args.mode == 'test': + # then load the previous scaler + with open(f'{models_folder}/scaler.pkl', 'rb') as f: + scaler = pickle.load(f) + + # remake the env with test data + env = MultiStockEnv(test_data, initial_investment) + + # make sure epsilon is not 1! + # no need to run multiple episodes if epsilon = 0, it's deterministic + agent.epsilon = 0.01 + + # load trained weights + agent.load(f'{models_folder}/dqn.ckpt') + + # play the game num_episodes times + for e in range(num_episodes): + t0 = datetime.now() + val = play_one_episode(agent, env, args.mode) + dt = datetime.now() - t0 + print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}") + portfolio_value.append(val) # append episode end portfolio value + + # save the weights when we are done + if args.mode == 'train': + # save the DQN + agent.save(f'{models_folder}/dqn.ckpt') + + # save the scaler + with open(f'{models_folder}/scaler.pkl', 'wb') as f: + pickle.dump(scaler, f) + + + # save portfolio value for each episode + np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) diff --git a/recommenders/autorec.py b/recommenders/autorec.py index 02ff05b9..fa0bd415 100644 --- a/recommenders/autorec.py +++ b/recommenders/autorec.py @@ -102,7 +102,7 @@ def test_generator(A, M, A_test, M_test): ) -r = model.fit_generator( +r = model.fit( generator(A, mask), validation_data=test_generator(A_copy, mask_copy, A_test_copy, mask_test_copy), epochs=epochs, diff --git a/recommenders/extra_reading.txt b/recommenders/extra_reading.txt index 21d09a4b..510410cd 100644 --- a/recommenders/extra_reading.txt +++ b/recommenders/extra_reading.txt @@ -56,4 +56,7 @@ AutoRec: Autoencoders Meet Collaborative Filtering http://users.cecs.anu.edu.au/~u5098633/papers/www15.pdf Collaborative Filtering for Implicit Feedback Datasets -http://yifanhu.net/PUB/cf.pdf \ No newline at end of file +http://yifanhu.net/PUB/cf.pdf + +Neural Collaborative Filtering +https://arxiv.org/abs/1708.05031 \ No newline at end of file diff --git a/recommenders/rbm_tf_k.py b/recommenders/rbm_tf_k.py index 6043085e..836a5595 100644 --- a/recommenders/rbm_tf_k.py +++ b/recommenders/rbm_tf_k.py @@ -14,6 +14,9 @@ from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz from datetime import datetime +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + # is it possible to one-hot encode the data prior to feeding it # into the neural network, so that we don't have to do it on the fly? @@ -84,13 +87,13 @@ def __init__(self, D, M, K): def build(self, D, M, K): # params - self.W = tf.Variable(tf.random_normal(shape=(D, K, M)) * np.sqrt(2.0 / M)) + self.W = tf.Variable(tf.random.normal(shape=(D, K, M)) * np.sqrt(2.0 / M)) self.c = tf.Variable(np.zeros(M).astype(np.float32)) self.b = tf.Variable(np.zeros((D, K)).astype(np.float32)) # data - self.X_in = tf.placeholder(tf.float32, shape=(None, D, K)) - self.mask = tf.placeholder(tf.float32, shape=(None, D, K)) + self.X_in = tf.compat.v1.placeholder(tf.float32, shape=(None, D, K)) + self.mask = tf.compat.v1.placeholder(tf.float32, shape=(None, D, K)) # conditional probabilities # NOTE: tf.contrib.distributions.Bernoulli API has changed in Tensorflow v1.2 @@ -99,21 +102,21 @@ def build(self, D, M, K): self.p_h_given_v = p_h_given_v # save for later # draw a sample from p(h | v) - r = tf.random_uniform(shape=tf.shape(p_h_given_v)) - H = tf.to_float(r < p_h_given_v) + r = tf.random.uniform(shape=tf.shape(input=p_h_given_v)) + H = tf.cast(r < p_h_given_v, dtype=tf.float32) # draw a sample from p(v | h) # note: we don't have to actually do the softmax logits = dot2(H, self.W) + self.b - cdist = tf.distributions.Categorical(logits=logits) + cdist = tf.compat.v1.distributions.Categorical(logits=logits) X_sample = cdist.sample() # shape is (N, D) X_sample = tf.one_hot(X_sample, depth=K) # turn it into (N, D, K) X_sample = X_sample * self.mask # missing ratings shouldn't contribute to objective # build the objective - objective = tf.reduce_mean(self.free_energy(self.X_in)) - tf.reduce_mean(self.free_energy(X_sample)) - self.train_op = tf.train.AdamOptimizer(1e-2).minimize(objective) + objective = tf.reduce_mean(input_tensor=self.free_energy(self.X_in)) - tf.reduce_mean(input_tensor=self.free_energy(X_sample)) + self.train_op = tf.compat.v1.train.AdamOptimizer(1e-2).minimize(objective) # self.train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(objective) # build the cost @@ -121,8 +124,8 @@ def build(self, D, M, K): # just to observe what happens during training logits = self.forward_logits(self.X_in) self.cost = tf.reduce_mean( - tf.nn.softmax_cross_entropy_with_logits( - labels=self.X_in, + input_tensor=tf.nn.softmax_cross_entropy_with_logits( + labels=tf.stop_gradient(self.X_in), logits=logits, ) ) @@ -130,8 +133,8 @@ def build(self, D, M, K): # to get the output self.output_visible = self.forward_output(self.X_in) - initop = tf.global_variables_initializer() - self.session = tf.Session() + initop = tf.compat.v1.global_variables_initializer() + self.session = tf.compat.v1.Session() self.session.run(initop) def fit(self, X, mask, X_test, mask_test, epochs=10, batch_sz=256, show_fig=True): @@ -202,10 +205,10 @@ def fit(self, X, mask, X_test, mask_test, epochs=10, batch_sz=256, show_fig=True plt.show() def free_energy(self, V): - first_term = -tf.reduce_sum(dot1(V, self.b)) + first_term = -tf.reduce_sum(input_tensor=dot1(V, self.b)) second_term = -tf.reduce_sum( # tf.log(1 + tf.exp(tf.matmul(V, self.W) + self.c)), - tf.nn.softplus(dot1(V, self.W) + self.c), + input_tensor=tf.nn.softplus(dot1(V, self.W) + self.c), axis=1 ) return first_term + second_term diff --git a/recommenders/rbm_tf_k_faster.py b/recommenders/rbm_tf_k_faster.py index 75100ba0..9a1a242a 100644 --- a/recommenders/rbm_tf_k_faster.py +++ b/recommenders/rbm_tf_k_faster.py @@ -14,6 +14,9 @@ from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz from datetime import datetime +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + def dot1(V, W): # V is N x D x K (batch of visible units) @@ -38,12 +41,12 @@ def __init__(self, D, M, K): def build(self, D, M, K): # params - self.W = tf.Variable(tf.random_normal(shape=(D, K, M)) * np.sqrt(2.0 / M)) + self.W = tf.Variable(tf.random.normal(shape=(D, K, M)) * np.sqrt(2.0 / M)) self.c = tf.Variable(np.zeros(M).astype(np.float32)) self.b = tf.Variable(np.zeros((D, K)).astype(np.float32)) # data - self.X_in = tf.placeholder(tf.float32, shape=(None, D)) + self.X_in = tf.compat.v1.placeholder(tf.float32, shape=(None, D)) # one hot encode X # first, make each rating an int @@ -57,13 +60,13 @@ def build(self, D, M, K): self.p_h_given_v = p_h_given_v # save for later # draw a sample from p(h | v) - r = tf.random_uniform(shape=tf.shape(p_h_given_v)) - H = tf.to_float(r < p_h_given_v) + r = tf.random.uniform(shape=tf.shape(input=p_h_given_v)) + H = tf.cast(r < p_h_given_v, dtype=tf.float32) # draw a sample from p(v | h) # note: we don't have to actually do the softmax logits = dot2(H, self.W) + self.b - cdist = tf.distributions.Categorical(logits=logits) + cdist = tf.compat.v1.distributions.Categorical(logits=logits) X_sample = cdist.sample() # shape is (N, D) X_sample = tf.one_hot(X_sample, depth=K) # turn it into (N, D, K) @@ -74,8 +77,8 @@ def build(self, D, M, K): # build the objective - objective = tf.reduce_mean(self.free_energy(X)) - tf.reduce_mean(self.free_energy(X_sample)) - self.train_op = tf.train.AdamOptimizer(1e-2).minimize(objective) + objective = tf.reduce_mean(input_tensor=self.free_energy(X)) - tf.reduce_mean(input_tensor=self.free_energy(X_sample)) + self.train_op = tf.compat.v1.train.AdamOptimizer(1e-2).minimize(objective) # self.train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(objective) # build the cost @@ -83,8 +86,8 @@ def build(self, D, M, K): # just to observe what happens during training logits = self.forward_logits(X) self.cost = tf.reduce_mean( - tf.nn.softmax_cross_entropy_with_logits( - labels=X, + input_tensor=tf.nn.softmax_cross_entropy_with_logits( + labels=tf.stop_gradient(X), logits=logits, ) ) @@ -98,17 +101,17 @@ def build(self, D, M, K): self.pred = tf.tensordot(self.output_visible, self.one_to_ten, axes=[[2], [0]]) mask = tf.cast(self.X_in > 0, tf.float32) se = mask * (self.X_in - self.pred) * (self.X_in - self.pred) - self.sse = tf.reduce_sum(se) + self.sse = tf.reduce_sum(input_tensor=se) # test SSE - self.X_test = tf.placeholder(tf.float32, shape=(None, D)) + self.X_test = tf.compat.v1.placeholder(tf.float32, shape=(None, D)) mask = tf.cast(self.X_test > 0, tf.float32) tse = mask * (self.X_test - self.pred) * (self.X_test - self.pred) - self.tsse = tf.reduce_sum(tse) + self.tsse = tf.reduce_sum(input_tensor=tse) - initop = tf.global_variables_initializer() - self.session = tf.Session() + initop = tf.compat.v1.global_variables_initializer() + self.session = tf.compat.v1.Session() self.session.run(initop) def fit(self, X, X_test, epochs=10, batch_sz=256, show_fig=True): @@ -168,10 +171,10 @@ def fit(self, X, X_test, epochs=10, batch_sz=256, show_fig=True): plt.show() def free_energy(self, V): - first_term = -tf.reduce_sum(dot1(V, self.b)) + first_term = -tf.reduce_sum(input_tensor=dot1(V, self.b)) second_term = -tf.reduce_sum( # tf.log(1 + tf.exp(tf.matmul(V, self.W) + self.c)), - tf.nn.softplus(dot1(V, self.W) + self.c), + input_tensor=tf.nn.softplus(dot1(V, self.W) + self.c), axis=1 ) return first_term + second_term diff --git a/recommenders/tfidf.py b/recommenders/tfidf.py new file mode 100644 index 00000000..a6078ec3 --- /dev/null +++ b/recommenders/tfidf.py @@ -0,0 +1,72 @@ +import pandas as pd +import json + +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances + + +# get the data from: https://www.kaggle.com/tmdb/tmdb-movie-metadata +# load in the data +df = pd.read_csv('../large_files/tmdb_5000_movies.csv') + + +# convert the relevant data for each movie into a single string +# to be ingested by TfidfVectorizer +def genres_and_keywords_to_string(row): + genres = json.loads(row['genres']) + genres = ' '.join(''.join(j['name'].split()) for j in genres) + + keywords = json.loads(row['keywords']) + keywords = ' '.join(''.join(j['name'].split()) for j in keywords) + return "%s %s" % (genres, keywords) + + +# create a new string representation of each movie +df['string'] = df.apply(genres_and_keywords_to_string, axis=1) + + +# create a tf-idf vectorizer object +# remove stopwords automatically +tfidf = TfidfVectorizer(max_features=2000) + +# create a data matrix from the overviews +X = tfidf.fit_transform(df['string']) + +# check the shape of X +print("X.shape:", X.shape) + +# generate a mapping from movie title -> index (in df) +movie2idx = pd.Series(df.index, index=df['title']) + +# create a function that generates recommendations +def recommend(title): + # get the row in the dataframe for this movie + idx = movie2idx[title] + if type(idx) == pd.Series: + idx = idx.iloc[0] + # print("idx:", idx) + + # calculate the pairwise similarities for this movie + query = X[idx] + scores = cosine_similarity(query, X) + + # currently the array is 1 x N, make it just a 1-D array + scores = scores.flatten() + + # get the indexes of the highest scoring movies + # get the first K recommendations + # don't return itself! + recommended_idx = (-scores).argsort()[1:6] + + # return the titles of the recommendations + return df['title'].iloc[recommended_idx] + + +print("\nRecommendations for 'Scream 3':") +print(recommend('Scream 3')) + +print("\nRecommendations for 'Mortal Kombat':") +print(recommend('Mortal Kombat')) + +print("\nRecommendations for 'Runaway Bride':") +print(recommend('Runaway Bride')) diff --git a/rl/approx_control.py b/rl/approx_control.py new file mode 100644 index 00000000..ba19e58f --- /dev/null +++ b/rl/approx_control.py @@ -0,0 +1,162 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from grid_world import standard_grid, negative_grid +from iterative_policy_evaluation import print_values, print_policy +from sklearn.kernel_approximation import Nystroem, RBFSampler + +GAMMA = 0.9 +ALPHA = 0.1 +ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') +ACTION2INT = {a: i for i, a in enumerate(ALL_POSSIBLE_ACTIONS)} +INT2ONEHOT = np.eye(len(ALL_POSSIBLE_ACTIONS)) + + +def epsilon_greedy(model, s, eps=0.1): + # we'll use epsilon-soft to ensure all states are visited + # what happens if you don't do this? i.e. eps=0 + p = np.random.random() + if p < (1 - eps): + values = model.predict_all_actions(s) + return ALL_POSSIBLE_ACTIONS[np.argmax(values)] + else: + return np.random.choice(ALL_POSSIBLE_ACTIONS) + + +def one_hot(k): + return INT2ONEHOT[k] + + +def merge_state_action(s, a): + ai = one_hot(ACTION2INT[a]) + return np.concatenate((s, ai)) + + +def gather_samples(grid, n_episodes=1000): + samples = [] + for _ in range(n_episodes): + s = grid.reset() + while not grid.game_over(): + a = np.random.choice(ALL_POSSIBLE_ACTIONS) + sa = merge_state_action(s, a) + samples.append(sa) + + r = grid.move(a) + s = grid.current_state() + return samples + + +class Model: + def __init__(self, grid): + # fit the featurizer to data + samples = gather_samples(grid) + # self.featurizer = Nystroem() + self.featurizer = RBFSampler() + self.featurizer.fit(samples) + dims = self.featurizer.n_components + + # initialize linear model weights + self.w = np.zeros(dims) + + def predict(self, s, a): + sa = merge_state_action(s, a) + x = self.featurizer.transform([sa])[0] + return x @ self.w + + def predict_all_actions(self, s): + return [self.predict(s, a) for a in ALL_POSSIBLE_ACTIONS] + + def grad(self, s, a): + sa = merge_state_action(s, a) + x = self.featurizer.transform([sa])[0] + return x + + +if __name__ == '__main__': + # use the standard grid again (0 for every step) so that we can compare + # to iterative policy evaluation + # grid = standard_grid() + grid = negative_grid(step_cost=-0.1) + + # print rewards + print("rewards:") + print_values(grid.rewards, grid) + + model = Model(grid) + reward_per_episode = [] + state_visit_count = {} + + # repeat until convergence + n_episodes = 20000 + for it in range(n_episodes): + if (it + 1) % 100 == 0: + print(it + 1) + + s = grid.reset() + state_visit_count[s] = state_visit_count.get(s, 0) + 1 + episode_reward = 0 + while not grid.game_over(): + a = epsilon_greedy(model, s) + r = grid.move(a) + s2 = grid.current_state() + state_visit_count[s2] = state_visit_count.get(s2, 0) + 1 + + # get the target + if grid.game_over(): + target = r + else: + values = model.predict_all_actions(s2) + target = r + GAMMA * np.max(values) + + # update the model + g = model.grad(s, a) + err = target - model.predict(s, a) + model.w += ALPHA * err * g + + # accumulate reward + episode_reward += r + + # update state + s = s2 + + reward_per_episode.append(episode_reward) + + plt.plot(reward_per_episode) + plt.title("Reward per episode") + plt.show() + + # obtain V* and pi* + V = {} + greedy_policy = {} + states = grid.all_states() + for s in states: + if s in grid.actions: + values = model.predict_all_actions(s) + V[s] = np.max(values) + greedy_policy[s] = ALL_POSSIBLE_ACTIONS[np.argmax(values)] + else: + # terminal state or state we can't otherwise get to + V[s] = 0 + + print("values:") + print_values(V, grid) + print("policy:") + print_policy(greedy_policy, grid) + + + print("state_visit_count:") + state_sample_count_arr = np.zeros((grid.rows, grid.cols)) + for i in range(grid.rows): + for j in range(grid.cols): + if (i, j) in state_visit_count: + state_sample_count_arr[i,j] = state_visit_count[(i, j)] + df = pd.DataFrame(state_sample_count_arr) + print(df) diff --git a/rl/approx_mc_prediction.py b/rl/approx_mc_prediction.py deleted file mode 100644 index 91d649bb..00000000 --- a/rl/approx_mc_prediction.py +++ /dev/null @@ -1,106 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -import matplotlib.pyplot as plt -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy - -# NOTE: this is only policy evaluation, not optimization - -# we'll try to obtain the same result as our other MC script -from monte_carlo_random import random_action, play_game, SMALL_ENOUGH, GAMMA, ALL_POSSIBLE_ACTIONS - -LEARNING_RATE = 0.001 - -if __name__ == '__main__': - # use the standard grid again (0 for every step) so that we can compare - # to iterative policy evaluation - grid = standard_grid() - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # state -> action - # found by policy_iteration_random on standard_grid - # MC method won't get exactly this, but should be close - # values: - # --------------------------- - # 0.43| 0.56| 0.72| 0.00| - # --------------------------- - # 0.33| 0.00| 0.21| 0.00| - # --------------------------- - # 0.25| 0.18| 0.11| -0.17| - # policy: - # --------------------------- - # R | R | R | | - # --------------------------- - # U | | U | | - # --------------------------- - # U | L | U | L | - policy = { - (2, 0): 'U', - (1, 0): 'U', - (0, 0): 'R', - (0, 1): 'R', - (0, 2): 'R', - (1, 2): 'U', - (2, 1): 'L', - (2, 2): 'U', - (2, 3): 'L', - } - - # initialize theta - # our model is V_hat = theta.dot(x) - # where x = [row, col, row*col, 1] - 1 for bias term - theta = np.random.randn(4) / 2 - def s2x(s): - return np.array([s[0] - 1, s[1] - 1.5, s[0]*s[1] - 3, 1]) - - # repeat until convergence - deltas = [] - t = 1.0 - for it in range(20000): - if it % 100 == 0: - t += 0.01 - alpha = LEARNING_RATE/t - # generate an episode using pi - biggest_change = 0 - states_and_returns = play_game(grid, policy) - seen_states = set() - for s, G in states_and_returns: - # check if we have already seen s - # called "first-visit" MC policy evaluation - if s not in seen_states: - old_theta = theta.copy() - x = s2x(s) - V_hat = theta.dot(x) - # grad(V_hat) wrt theta = x - theta += alpha*(G - V_hat)*x - biggest_change = max(biggest_change, np.abs(old_theta - theta).sum()) - seen_states.add(s) - deltas.append(biggest_change) - - plt.plot(deltas) - plt.show() - - # obtain predicted values - V = {} - states = grid.all_states() - for s in states: - if s in grid.actions: - V[s] = theta.dot(s2x(s)) - else: - # terminal state or state we can't otherwise get to - V[s] = 0 - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) diff --git a/rl/approx_prediction.py b/rl/approx_prediction.py new file mode 100644 index 00000000..4e75d9fc --- /dev/null +++ b/rl/approx_prediction.py @@ -0,0 +1,144 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +import matplotlib.pyplot as plt +from grid_world import standard_grid, negative_grid +from iterative_policy_evaluation import print_values, print_policy +from sklearn.kernel_approximation import Nystroem, RBFSampler + +GAMMA = 0.9 +ALPHA = 0.01 +ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') + + +def epsilon_greedy(greedy, s, eps=0.1): + # we'll use epsilon-soft to ensure all states are visited + # what happens if you don't do this? i.e. eps=0 + p = np.random.random() + if p < (1 - eps): + return greedy[s] + else: + return np.random.choice(ALL_POSSIBLE_ACTIONS) + + +def gather_samples(grid, n_episodes=10000): + samples = [] + for _ in range(n_episodes): + s = grid.reset() + samples.append(s) + while not grid.game_over(): + a = np.random.choice(ALL_POSSIBLE_ACTIONS) + r = grid.move(a) + s = grid.current_state() + samples.append(s) + return samples + + +class Model: + def __init__(self, grid): + # fit the featurizer to data + samples = gather_samples(grid) + # self.featurizer = Nystroem() + self.featurizer = RBFSampler() + self.featurizer.fit(samples) + dims = self.featurizer.n_components + + # initialize linear model weights + self.w = np.zeros(dims) + + def predict(self, s): + x = self.featurizer.transform([s])[0] + return x @ self.w + + def grad(self, s): + x = self.featurizer.transform([s])[0] + return x + + +if __name__ == '__main__': + # use the standard grid again (0 for every step) so that we can compare + # to iterative policy evaluation + grid = standard_grid() + + # print rewards + print("rewards:") + print_values(grid.rewards, grid) + + # state -> action + greedy_policy = { + (2, 0): 'U', + (1, 0): 'U', + (0, 0): 'R', + (0, 1): 'R', + (0, 2): 'R', + (1, 2): 'R', + (2, 1): 'R', + (2, 2): 'R', + (2, 3): 'U', + } + + model = Model(grid) + mse_per_episode = [] + + # repeat until convergence + n_episodes = 10000 + for it in range(n_episodes): + if (it + 1) % 100 == 0: + print(it + 1) + + s = grid.reset() + Vs = model.predict(s) + n_steps = 0 + episode_err = 0 + while not grid.game_over(): + a = epsilon_greedy(greedy_policy, s) + r = grid.move(a) + s2 = grid.current_state() + + # get the target + if grid.is_terminal(s2): + target = r + else: + Vs2 = model.predict(s2) + target = r + GAMMA * Vs2 + + # update the model + g = model.grad(s) + err = target - Vs + model.w += ALPHA * err * g + + # accumulate error + n_steps += 1 + episode_err += err*err + + # update state + s = s2 + Vs = Vs2 + + mse = episode_err / n_steps + mse_per_episode.append(mse) + + plt.plot(mse_per_episode) + plt.title("MSE per episode") + plt.show() + + # obtain predicted values + V = {} + states = grid.all_states() + for s in states: + if s in grid.actions: + V[s] = model.predict(s) + else: + # terminal state or state we can't otherwise get to + V[s] = 0 + + print("values:") + print_values(V, grid) + print("policy:") + print_policy(greedy_policy, grid) diff --git a/rl/approx_q_learning.py b/rl/approx_q_learning.py deleted file mode 100644 index c3c1a35d..00000000 --- a/rl/approx_q_learning.py +++ /dev/null @@ -1,190 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -import matplotlib.pyplot as plt -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy -from monte_carlo_es import max_dict -from sarsa import random_action, GAMMA, ALPHA, ALL_POSSIBLE_ACTIONS - -SA2IDX = {} -IDX = 0 - -class Model: - def __init__(self): - self.theta = np.random.randn(25) / np.sqrt(25) - # if we use SA2IDX, a one-hot encoding for every (s,a) pair - # in reality we wouldn't want to do this b/c we have just - # as many params as before - # print "D:", IDX - # self.theta = np.random.randn(IDX) / np.sqrt(IDX) - - def sa2x(self, s, a): - # NOTE: using just (r, c, r*c, u, d, l, r, 1) is not expressive enough - return np.array([ - s[0] - 1 if a == 'U' else 0, - s[1] - 1.5 if a == 'U' else 0, - (s[0]*s[1] - 3)/3 if a == 'U' else 0, - (s[0]*s[0] - 2)/2 if a == 'U' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'U' else 0, - 1 if a == 'U' else 0, - s[0] - 1 if a == 'D' else 0, - s[1] - 1.5 if a == 'D' else 0, - (s[0]*s[1] - 3)/3 if a == 'D' else 0, - (s[0]*s[0] - 2)/2 if a == 'D' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'D' else 0, - 1 if a == 'D' else 0, - s[0] - 1 if a == 'L' else 0, - s[1] - 1.5 if a == 'L' else 0, - (s[0]*s[1] - 3)/3 if a == 'L' else 0, - (s[0]*s[0] - 2)/2 if a == 'L' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'L' else 0, - 1 if a == 'L' else 0, - s[0] - 1 if a == 'R' else 0, - s[1] - 1.5 if a == 'R' else 0, - (s[0]*s[1] - 3)/3 if a == 'R' else 0, - (s[0]*s[0] - 2)/2 if a == 'R' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'R' else 0, - 1 if a == 'R' else 0, - 1 - ]) - # if we use SA2IDX, a one-hot encoding for every (s,a) pair - # in reality we wouldn't want to do this b/c we have just - # as many params as before - # x = np.zeros(len(self.theta)) - # idx = SA2IDX[s][a] - # x[idx] = 1 - # return x - - def predict(self, s, a): - x = self.sa2x(s, a) - return self.theta.dot(x) - - def grad(self, s, a): - return self.sa2x(s, a) - - -def getQs(model, s): - # we need Q(s,a) to choose an action - # i.e. a = argmax[a]{ Q(s,a) } - Qs = {} - for a in ALL_POSSIBLE_ACTIONS: - q_sa = model.predict(s, a) - Qs[a] = q_sa - return Qs - - -if __name__ == '__main__': - # NOTE: if we use the standard grid, there's a good chance we will end up with - # suboptimal policies - # e.g. - # --------------------------- - # R | R | R | | - # --------------------------- - # R* | | U | | - # --------------------------- - # U | R | U | L | - # since going R at (1,0) (shown with a *) incurs no cost, it's OK to keep doing that. - # we'll either end up staying in the same spot, or back to the start (2,0), at which - # point we whould then just go back up, or at (0,0), at which point we can continue - # on right. - # instead, let's penalize each movement so the agent will find a shorter route. - # - # grid = standard_grid() - grid = negative_grid(step_cost=-0.1) - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # no policy initialization, we will derive our policy from most recent Q - # enumerate all (s,a) pairs, each will have its own weight in our "dumb" model - # essentially each weight will be a measure of Q(s,a) itself - states = grid.all_states() - for s in states: - SA2IDX[s] = {} - for a in ALL_POSSIBLE_ACTIONS: - SA2IDX[s][a] = IDX - IDX += 1 - - # initialize model - model = Model() - - # repeat until convergence - t = 1.0 - t2 = 1.0 - deltas = [] - for it in range(20000): - if it % 100 == 0: - t += 0.01 - t2 += 0.01 - if it % 1000 == 0: - print("it:", it) - alpha = ALPHA / t2 - - # instead of 'generating' an epsiode, we will PLAY - # an episode within this loop - s = (2, 0) # start state - grid.set_state(s) - - # get Q(s) so we can choose the first action - Qs = getQs(model, s) - - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - a = max_dict(Qs)[0] - a = random_action(a, eps=0.5/t) # epsilon-greedy - biggest_change = 0 - while not grid.game_over(): - r = grid.move(a) - s2 = grid.current_state() - - # we need the next action as well since Q(s,a) depends on Q(s',a') - # if s2 not in policy then it's a terminal state, all Q are 0 - old_theta = model.theta.copy() - if grid.is_terminal(s2): - model.theta += alpha*(r - model.predict(s, a))*model.grad(s, a) - else: - # not terminal - Qs2 = getQs(model, s2) - a2, maxQs2a2 = max_dict(Qs2) - a2 = random_action(a2, eps=0.5/t) # epsilon-greedy - - # we will update Q(s,a) AS we experience the episode - model.theta += alpha*(r + GAMMA*maxQs2a2 - model.predict(s, a))*model.grad(s, a) - - # next state becomes current state - s = s2 - a = a2 - - biggest_change = max(biggest_change, np.abs(model.theta - old_theta).sum()) - deltas.append(biggest_change) - - plt.plot(deltas) - plt.show() - - # determine the policy from Q* - # find V* from Q* - policy = {} - V = {} - Q = {} - for s in grid.actions.keys(): - Qs = getQs(model, s) - Q[s] = Qs - a, max_q = max_dict(Qs) - policy[s] = a - V[s] = max_q - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) diff --git a/rl/approx_semigradient_sarsa_control.py b/rl/approx_semigradient_sarsa_control.py deleted file mode 100644 index c7ce0a79..00000000 --- a/rl/approx_semigradient_sarsa_control.py +++ /dev/null @@ -1,190 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -import matplotlib.pyplot as plt -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy -from monte_carlo_es import max_dict -from sarsa import random_action, GAMMA, ALPHA, ALL_POSSIBLE_ACTIONS - -SA2IDX = {} -IDX = 0 - -class Model: - def __init__(self): - self.theta = np.random.randn(25) / np.sqrt(25) - # if we use SA2IDX, a one-hot encoding for every (s,a) pair - # in reality we wouldn't want to do this b/c we have just - # as many params as before - # print "D:", IDX - # self.theta = np.random.randn(IDX) / np.sqrt(IDX) - - def sa2x(self, s, a): - # NOTE: using just (r, c, r*c, u, d, l, r, 1) is not expressive enough - return np.array([ - s[0] - 1 if a == 'U' else 0, - s[1] - 1.5 if a == 'U' else 0, - (s[0]*s[1] - 3)/3 if a == 'U' else 0, - (s[0]*s[0] - 2)/2 if a == 'U' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'U' else 0, - 1 if a == 'U' else 0, - s[0] - 1 if a == 'D' else 0, - s[1] - 1.5 if a == 'D' else 0, - (s[0]*s[1] - 3)/3 if a == 'D' else 0, - (s[0]*s[0] - 2)/2 if a == 'D' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'D' else 0, - 1 if a == 'D' else 0, - s[0] - 1 if a == 'L' else 0, - s[1] - 1.5 if a == 'L' else 0, - (s[0]*s[1] - 3)/3 if a == 'L' else 0, - (s[0]*s[0] - 2)/2 if a == 'L' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'L' else 0, - 1 if a == 'L' else 0, - s[0] - 1 if a == 'R' else 0, - s[1] - 1.5 if a == 'R' else 0, - (s[0]*s[1] - 3)/3 if a == 'R' else 0, - (s[0]*s[0] - 2)/2 if a == 'R' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'R' else 0, - 1 if a == 'R' else 0, - 1 - ]) - # if we use SA2IDX, a one-hot encoding for every (s,a) pair - # in reality we wouldn't want to do this b/c we have just - # as many params as before - # x = np.zeros(len(self.theta)) - # idx = SA2IDX[s][a] - # x[idx] = 1 - # return x - - def predict(self, s, a): - x = self.sa2x(s, a) - return self.theta.dot(x) - - def grad(self, s, a): - return self.sa2x(s, a) - - -def getQs(model, s): - # we need Q(s,a) to choose an action - # i.e. a = argmax[a]{ Q(s,a) } - Qs = {} - for a in ALL_POSSIBLE_ACTIONS: - q_sa = model.predict(s, a) - Qs[a] = q_sa - return Qs - - -if __name__ == '__main__': - # NOTE: if we use the standard grid, there's a good chance we will end up with - # suboptimal policies - # e.g. - # --------------------------- - # R | R | R | | - # --------------------------- - # R* | | U | | - # --------------------------- - # U | R | U | L | - # since going R at (1,0) (shown with a *) incurs no cost, it's OK to keep doing that. - # we'll either end up staying in the same spot, or back to the start (2,0), at which - # point we whould then just go back up, or at (0,0), at which point we can continue - # on right. - # instead, let's penalize each movement so the agent will find a shorter route. - # - # grid = standard_grid() - grid = negative_grid(step_cost=-0.1) - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # no policy initialization, we will derive our policy from most recent Q - # enumerate all (s,a) pairs, each will have its own weight in our "dumb" model - # essentially each weight will be a measure of Q(s,a) itself - states = grid.all_states() - for s in states: - SA2IDX[s] = {} - for a in ALL_POSSIBLE_ACTIONS: - SA2IDX[s][a] = IDX - IDX += 1 - - # initialize model - model = Model() - - # repeat until convergence - t = 1.0 - t2 = 1.0 - deltas = [] - for it in range(20000): - if it % 100 == 0: - t += 0.01 - t2 += 0.01 - if it % 1000 == 0: - print("it:", it) - alpha = ALPHA / t2 - - # instead of 'generating' an epsiode, we will PLAY - # an episode within this loop - s = (2, 0) # start state - grid.set_state(s) - - # get Q(s) so we can choose the first action - Qs = getQs(model, s) - - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - a = max_dict(Qs)[0] - a = random_action(a, eps=0.5/t) # epsilon-greedy - biggest_change = 0 - while not grid.game_over(): - r = grid.move(a) - s2 = grid.current_state() - - # we need the next action as well since Q(s,a) depends on Q(s',a') - # if s2 not in policy then it's a terminal state, all Q are 0 - old_theta = model.theta.copy() - if grid.is_terminal(s2): - model.theta += alpha*(r - model.predict(s, a))*model.grad(s, a) - else: - # not terminal - Qs2 = getQs(model, s2) - a2 = max_dict(Qs2)[0] - a2 = random_action(a2, eps=0.5/t) # epsilon-greedy - - # we will update Q(s,a) AS we experience the episode - model.theta += alpha*(r + GAMMA*model.predict(s2, a2) - model.predict(s, a))*model.grad(s, a) - - # next state becomes current state - s = s2 - a = a2 - - biggest_change = max(biggest_change, np.abs(model.theta - old_theta).sum()) - deltas.append(biggest_change) - - plt.plot(deltas) - plt.show() - - # determine the policy from Q* - # find V* from Q* - policy = {} - V = {} - Q = {} - for s in grid.actions.keys(): - Qs = getQs(model, s) - Q[s] = Qs - a, max_q = max_dict(Qs) - policy[s] = a - V[s] = max_q - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) diff --git a/rl/approx_semigradient_td0_prediction.py b/rl/approx_semigradient_td0_prediction.py deleted file mode 100644 index ea9430be..00000000 --- a/rl/approx_semigradient_td0_prediction.py +++ /dev/null @@ -1,101 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -import matplotlib.pyplot as plt -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy -from td0_prediction import play_game, SMALL_ENOUGH, GAMMA, ALPHA, ALL_POSSIBLE_ACTIONS - -# NOTE: this is only policy evaluation, not optimization - -class Model: - def __init__(self): - self.theta = np.random.randn(4) / 2 - - def s2x(self, s): - return np.array([s[0] - 1, s[1] - 1.5, s[0]*s[1] - 3, 1]) - - def predict(self, s): - x = self.s2x(s) - return self.theta.dot(x) - - def grad(self, s): - return self.s2x(s) - - -if __name__ == '__main__': - # use the standard grid again (0 for every step) so that we can compare - # to iterative policy evaluation - grid = standard_grid() - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # state -> action - policy = { - (2, 0): 'U', - (1, 0): 'U', - (0, 0): 'R', - (0, 1): 'R', - (0, 2): 'R', - (1, 2): 'R', - (2, 1): 'R', - (2, 2): 'R', - (2, 3): 'U', - } - - model = Model() - deltas = [] - - # repeat until convergence - k = 1.0 - for it in range(20000): - if it % 10 == 0: - k += 0.01 - alpha = ALPHA/k - biggest_change = 0 - - # generate an episode using pi - states_and_rewards = play_game(grid, policy) - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - for t in range(len(states_and_rewards) - 1): - s, _ = states_and_rewards[t] - s2, r = states_and_rewards[t+1] - # we will update V(s) AS we experience the episode - old_theta = model.theta.copy() - if grid.is_terminal(s2): - target = r - else: - target = r + GAMMA*model.predict(s2) - model.theta += alpha*(target - model.predict(s))*model.grad(s) - biggest_change = max(biggest_change, np.abs(old_theta - model.theta).sum()) - deltas.append(biggest_change) - - plt.plot(deltas) - plt.show() - - # obtain predicted values - V = {} - states = grid.all_states() - for s in states: - if s in grid.actions: - V[s] = model.predict(s) - else: - # terminal state or state we can't otherwise get to - V[s] = 0 - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) diff --git a/rl/bayesian_bandit.py b/rl/bayesian_bandit.py new file mode 100644 index 00000000..61e8f812 --- /dev/null +++ b/rl/bayesian_bandit.py @@ -0,0 +1,78 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np +from scipy.stats import beta + + +# np.random.seed(2) +NUM_TRIALS = 2000 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + self.p = p + self.a = 1 + self.b = 1 + self.N = 0 # for information only + + def pull(self): + return np.random.random() < self.p + + def sample(self): + return np.random.beta(self.a, self.b) + + def update(self, x): + self.a += x + self.b += 1 - x + self.N += 1 + + +def plot(bandits, trial): + x = np.linspace(0, 1, 200) + for b in bandits: + y = beta.pdf(x, b.a, b.b) + plt.plot(x, y, label=f"real p: {b.p:.4f}, win rate = {b.a - 1}/{b.N}") + plt.title(f"Bandit distributions after {trial} trials") + plt.legend() + plt.show() + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # Thompson sampling + j = np.argmax([b.sample() for b in bandits]) + + # plot the posteriors + if i in sample_points: + plot(bandits, i) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + +if __name__ == "__main__": + experiment() diff --git a/rl/bayesian_normal.py b/rl/bayesian_normal.py new file mode 100644 index 00000000..07083d1b --- /dev/null +++ b/rl/bayesian_normal.py @@ -0,0 +1,84 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt +from scipy.stats import norm + + +np.random.seed(1) +NUM_TRIALS = 2000 +BANDIT_MEANS = [1, 2, 3] + + +class Bandit: + def __init__(self, true_mean): + self.true_mean = true_mean + # parameters for mu - prior is N(0,1) + self.m = 0 + self.lambda_ = 1 + self.tau = 1 + self.N = 0 + + def pull(self): + return np.random.randn() / np.sqrt(self.tau) + self.true_mean + + def sample(self): + return np.random.randn() / np.sqrt(self.lambda_) + self.m + + def update(self, x): + self.m = (self.tau * x + self.lambda_ * self.m) / (self.tau + self.lambda_) + self.lambda_ += self.tau + self.N += 1 + + +def plot(bandits, trial): + x = np.linspace(-3, 6, 200) + for b in bandits: + y = norm.pdf(x, b.m, np.sqrt(1. / b.lambda_)) + plt.plot(x, y, label=f"real mean: {b.true_mean:.4f}, num plays: {b.N}") + plt.title(f"Bandit distributions after {trial} trials") + plt.legend() + plt.show() + + +def run_experiment(): + bandits = [Bandit(m) for m in BANDIT_MEANS] + + sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.empty(NUM_TRIALS) + for i in range(NUM_TRIALS): + # Thompson sampling + j = np.argmax([b.sample() for b in bandits]) + + # plot the posteriors + if i in sample_points: + plot(bandits, i) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + # update rewards + rewards[i] = x + + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + for m in BANDIT_MEANS: + plt.plot(np.ones(NUM_TRIALS)*m) + plt.show() + + return cumulative_average + +if __name__ == '__main__': + run_experiment() + + diff --git a/rl/bayesian_starter.py b/rl/bayesian_starter.py new file mode 100644 index 00000000..68e12f75 --- /dev/null +++ b/rl/bayesian_starter.py @@ -0,0 +1,78 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np +from scipy.stats import beta + + +# np.random.seed(2) +NUM_TRIALS = 2000 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + self.p = p + self.a = # TODO + self.b = # TODO + self.N = 0 # for information only + + def pull(self): + return np.random.random() < self.p + + def sample(self): + return # TODO - draw a sample from Beta(a, b) + + def update(self, x): + self.a = # TODO + self.b = # TODO + self.N += 1 + + +def plot(bandits, trial): + x = np.linspace(0, 1, 200) + for b in bandits: + y = beta.pdf(x, b.a, b.b) + plt.plot(x, y, label=f"real p: {b.p:.4f}, win rate = {b.a - 1}/{b.N}") + plt.title(f"Bandit distributions after {trial} trials") + plt.legend() + plt.show() + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # Thompson sampling + j = # TODO + + # plot the posteriors + if i in sample_points: + plot(bandits, i) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + +if __name__ == "__main__": + experiment() diff --git a/rl/cartpole.py b/rl/cartpole.py new file mode 100644 index 00000000..abb1b617 --- /dev/null +++ b/rl/cartpole.py @@ -0,0 +1,157 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import gym +import numpy as np +import matplotlib.pyplot as plt +from sklearn.kernel_approximation import RBFSampler + + +GAMMA = 0.99 +ALPHA = 0.1 + + +def epsilon_greedy(model, s, eps=0.1): + # we'll use epsilon-soft to ensure all states are visited + # what happens if you don't do this? i.e. eps=0 + p = np.random.random() + if p < (1 - eps): + values = model.predict_all_actions(s) + return np.argmax(values) + else: + return model.env.action_space.sample() + + +def gather_samples(env, n_episodes=10000): + samples = [] + for _ in range(n_episodes): + s, info = env.reset() + done = False + truncated = False + while not (done or truncated): + a = env.action_space.sample() + sa = np.concatenate((s, [a])) + samples.append(sa) + + s, r, done, truncated, info = env.step(a) + return samples + + +class Model: + def __init__(self, env): + # fit the featurizer to data + self.env = env + samples = gather_samples(env) + self.featurizer = RBFSampler() + self.featurizer.fit(samples) + dims = self.featurizer.n_components + + # initialize linear model weights + self.w = np.zeros(dims) + + def predict(self, s, a): + sa = np.concatenate((s, [a])) + x = self.featurizer.transform([sa])[0] + return x @ self.w + + def predict_all_actions(self, s): + return [self.predict(s, a) for a in range(self.env.action_space.n)] + + def grad(self, s, a): + sa = np.concatenate((s, [a])) + x = self.featurizer.transform([sa])[0] + return x + + +def test_agent(model, env, n_episodes=20): + reward_per_episode = np.zeros(n_episodes) + for it in range(n_episodes): + done = False + truncated = False + episode_reward = 0 + s, info = env.reset() + while not (done or truncated): + a = epsilon_greedy(model, s, eps=0) + s, r, done, truncated, info = env.step(a) + episode_reward += r + reward_per_episode[it] = episode_reward + return np.mean(reward_per_episode) + + +def watch_agent(model, env, eps): + done = False + truncated = False + episode_reward = 0 + s, info = env.reset() + while not (done or truncated): + a = epsilon_greedy(model, s, eps=eps) + s, r, done, truncated, info = env.step(a) + episode_reward += r + print("Episode reward:", episode_reward) + + +if __name__ == '__main__': + # instantiate environment + env = gym.make("CartPole-v1", render_mode="rgb_array") + + model = Model(env) + reward_per_episode = [] + + # watch untrained agent + watch_agent(model, env, eps=0) + + # repeat until convergence + n_episodes = 1500 + for it in range(n_episodes): + s, info = env.reset() + episode_reward = 0 + done = False + truncated = False + while not (done or truncated): + a = epsilon_greedy(model, s) + s2, r, done, truncated, info = env.step(a) + + # get the target + if done: + target = r + else: + values = model.predict_all_actions(s2) + target = r + GAMMA * np.max(values) + + # update the model + g = model.grad(s, a) + err = target - model.predict(s, a) + model.w += ALPHA * err * g + + # accumulate reward + episode_reward += r + + # update state + s = s2 + + if (it + 1) % 50 == 0: + print(f"Episode: {it + 1}, Reward: {episode_reward}") + + # early exit + if it > 20 and np.mean(reward_per_episode[-20:]) == 200: + print("Early exit") + break + + reward_per_episode.append(episode_reward) + + # test trained agent + test_reward = test_agent(model, env) + print(f"Average test reward: {test_reward}") + + plt.plot(reward_per_episode) + plt.title("Reward per episode") + plt.show() + + # watch trained agent + env = gym.make("CartPole-v1", render_mode="human") + watch_agent(model, env, eps=0) + diff --git a/rl/cartpole_gym0.19.py b/rl/cartpole_gym0.19.py new file mode 100644 index 00000000..2ef157b5 --- /dev/null +++ b/rl/cartpole_gym0.19.py @@ -0,0 +1,153 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import gym +import numpy as np +import matplotlib.pyplot as plt +from sklearn.kernel_approximation import RBFSampler + + +GAMMA = 0.99 +ALPHA = 0.1 + + +def epsilon_greedy(model, s, eps=0.1): + # we'll use epsilon-soft to ensure all states are visited + # what happens if you don't do this? i.e. eps=0 + p = np.random.random() + if p < (1 - eps): + values = model.predict_all_actions(s) + return np.argmax(values) + else: + return model.env.action_space.sample() + + +def gather_samples(env, n_episodes=10000): + samples = [] + for _ in range(n_episodes): + s = env.reset() + done = False + while not done: + a = env.action_space.sample() + sa = np.concatenate((s, [a])) + samples.append(sa) + + s, r, done, info = env.step(a) + return samples + + +class Model: + def __init__(self, env): + # fit the featurizer to data + self.env = env + samples = gather_samples(env) + self.featurizer = RBFSampler() + self.featurizer.fit(samples) + dims = self.featurizer.n_components + + # initialize linear model weights + self.w = np.zeros(dims) + + def predict(self, s, a): + sa = np.concatenate((s, [a])) + x = self.featurizer.transform([sa])[0] + return x @ self.w + + def predict_all_actions(self, s): + return [self.predict(s, a) for a in range(self.env.action_space.n)] + + def grad(self, s, a): + sa = np.concatenate((s, [a])) + x = self.featurizer.transform([sa])[0] + return x + + +def test_agent(model, env, n_episodes=20): + reward_per_episode = np.zeros(n_episodes) + for it in range(n_episodes): + done = False + episode_reward = 0 + s = env.reset() + while not done: + a = epsilon_greedy(model, s, eps=0) + s, r, done, info = env.step(a) + episode_reward += r + reward_per_episode[it] = episode_reward + return np.mean(reward_per_episode) + + +def watch_agent(model, env, eps): + done = False + episode_reward = 0 + s = env.reset() + while not done: + a = epsilon_greedy(model, s, eps=eps) + s, r, done, info = env.step(a) + env.render() + episode_reward += r + print("Episode reward:", episode_reward) + + +if __name__ == '__main__': + # instantiate environment + env = gym.make("CartPole-v0") + + model = Model(env) + reward_per_episode = [] + + # watch untrained agent + watch_agent(model, env, eps=0) + + # repeat until convergence + n_episodes = 1500 + for it in range(n_episodes): + s = env.reset() + episode_reward = 0 + done = False + while not done: + a = epsilon_greedy(model, s) + s2, r, done, info = env.step(a) + + # get the target + if done: + target = r + else: + values = model.predict_all_actions(s2) + target = r + GAMMA * np.max(values) + + # update the model + g = model.grad(s, a) + err = target - model.predict(s, a) + model.w += ALPHA * err * g + + # accumulate reward + episode_reward += r + + # update state + s = s2 + + if (it + 1) % 50 == 0: + print(f"Episode: {it + 1}, Reward: {episode_reward}") + + # early exit + if it > 20 and np.mean(reward_per_episode[-20:]) == 200: + print("Early exit") + break + + reward_per_episode.append(episode_reward) + + # test trained agent + test_reward = test_agent(model, env) + print(f"Average test reward: {test_reward}") + + plt.plot(reward_per_episode) + plt.title("Reward per episode") + plt.show() + + # watch trained agent + watch_agent(model, env, eps=0) + diff --git a/rl/epsilon_greedy.py b/rl/epsilon_greedy.py new file mode 100644 index 00000000..b906de88 --- /dev/null +++ b/rl/epsilon_greedy.py @@ -0,0 +1,88 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + num_times_explored = 0 + num_times_exploited = 0 + num_optimal = 0 + optimal_j = np.argmax([b.p for b in bandits]) + print("optimal j:", optimal_j) + + for i in range(NUM_TRIALS): + + # use epsilon-greedy to select the next bandit + if np.random.random() < EPS: + num_times_explored += 1 + j = np.random.randint(len(bandits)) + else: + num_times_exploited += 1 + j = np.argmax([b.p_estimate for b in bandits]) + + if j == optimal_j: + num_optimal += 1 + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num_times_explored:", num_times_explored) + print("num_times_exploited:", num_times_exploited) + print("num times selected optimal bandit:", num_optimal) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/rl/epsilon_greedy_starter.py b/rl/epsilon_greedy_starter.py new file mode 100644 index 00000000..4bb9d278 --- /dev/null +++ b/rl/epsilon_greedy_starter.py @@ -0,0 +1,88 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = # TODO + self.N = # TODO + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N = # TODO + self.p_estimate = # TODO + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + num_times_explored = 0 + num_times_exploited = 0 + num_optimal = 0 + optimal_j = np.argmax([b.p for b in bandits]) + print("optimal j:", optimal_j) + + for i in range(NUM_TRIALS): + + # use epsilon-greedy to select the next bandit + if np.random.random() < EPS: + num_times_explored += 1 + j = # TODO + else: + num_times_exploited += 1 + j = # TODO + + if j == optimal_j: + num_optimal += 1 + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num_times_explored:", num_times_explored) + print("num_times_exploited:", num_times_exploited) + print("num times selected optimal bandit:", num_optimal) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/rl/extra_reading.txt b/rl/extra_reading.txt index 81f52a48..fac79d64 100644 --- a/rl/extra_reading.txt +++ b/rl/extra_reading.txt @@ -1,6 +1,15 @@ +Finite-time Analysis of the Multiarmed Bandit Problem +https://homes.di.unimi.it/cesa-bianchi/Pubblicazioni/ml-02.pdf + +A Nice Lecture for Students Who Claim "RL Doesn't Use Math" +https://www.youtube.com/watch?v=dhEF5pfYmvc + Hacking Google reCAPTCHA v3 using Reinforcement Learning https://arxiv.org/pdf/1903.01003.pdf +Practical Deep Reinforcement Learning Approach for Stock Trading +https://arxiv.org/abs/1811.07522 + Reinforcement Learning: A Tutorial Survey and Recent Advances - Abhijit Gosavi http://web.mst.edu/~gosavia/joc.pdf diff --git a/rl/grid_world.py b/rl/grid_world.py index 891b5441..ff3c68af 100644 --- a/rl/grid_world.py +++ b/rl/grid_world.py @@ -5,10 +5,12 @@ # Note: you may need to update your version of future # sudo pip install -U future - import numpy as np +ACTION_SPACE = ('U', 'D', 'L', 'R') + + class Grid: # Environment def __init__(self, rows, cols, start): self.rows = rows @@ -32,6 +34,28 @@ def current_state(self): def is_terminal(self, s): return s not in self.actions + def reset(self): + # put agent back in start position + self.i = 2 + self.j = 0 + return (self.i, self.j) + + def get_next_state(self, s, a): + # this answers: where would I end up if I perform action 'a' in state 's'? + i, j = s[0], s[1] + + # if this action moves you somewhere else, then it will be in this dictionary + if a in self.actions[(i, j)]: + if a == 'U': + i -= 1 + elif a == 'D': + i += 1 + elif a == 'R': + j += 1 + elif a == 'L': + j -= 1 + return i, j + def move(self, action): # check if legal move first if action in self.actions[(self.i, self.j)]: @@ -116,3 +140,231 @@ def negative_grid(step_cost=-0.1): }) return g + + + + +class WindyGrid: + def __init__(self, rows, cols, start): + self.rows = rows + self.cols = cols + self.i = start[0] + self.j = start[1] + + def set(self, rewards, actions, probs): + # rewards should be a dict of: (i, j): r (row, col): reward + # actions should be a dict of: (i, j): A (row, col): list of possible actions + self.rewards = rewards + self.actions = actions + self.probs = probs + + def set_state(self, s): + self.i = s[0] + self.j = s[1] + + def current_state(self): + return (self.i, self.j) + + def is_terminal(self, s): + return s not in self.actions + + def move(self, action): + s = (self.i, self.j) + a = action + + next_state_probs = self.probs[(s, a)] + next_states = list(next_state_probs.keys()) + next_probs = list(next_state_probs.values()) + next_state_idx = np.random.choice(len(next_states), p=next_probs) + s2 = next_states[next_state_idx] + + # update the current state + self.i, self.j = s2 + + # return a reward (if any) + return self.rewards.get(s2, 0) + + def game_over(self): + # returns true if game is over, else false + # true if we are in a state where no actions are possible + return (self.i, self.j) not in self.actions + + def all_states(self): + # possibly buggy but simple way to get all states + # either a position that has possible next actions + # or a position that yields a reward + return set(self.actions.keys()) | set(self.rewards.keys()) + + +def windy_grid(): + g = WindyGrid(3, 4, (2, 0)) + rewards = {(0, 3): 1, (1, 3): -1} + actions = { + (0, 0): ('D', 'R'), + (0, 1): ('L', 'R'), + (0, 2): ('L', 'D', 'R'), + (1, 0): ('U', 'D'), + (1, 2): ('U', 'D', 'R'), + (2, 0): ('U', 'R'), + (2, 1): ('L', 'R'), + (2, 2): ('L', 'R', 'U'), + (2, 3): ('L', 'U'), + } + + # p(s' | s, a) represented as: + # KEY: (s, a) --> VALUE: {s': p(s' | s, a)} + probs = { + ((2, 0), 'U'): {(1, 0): 1.0}, + ((2, 0), 'D'): {(2, 0): 1.0}, + ((2, 0), 'L'): {(2, 0): 1.0}, + ((2, 0), 'R'): {(2, 1): 1.0}, + ((1, 0), 'U'): {(0, 0): 1.0}, + ((1, 0), 'D'): {(2, 0): 1.0}, + ((1, 0), 'L'): {(1, 0): 1.0}, + ((1, 0), 'R'): {(1, 0): 1.0}, + ((0, 0), 'U'): {(0, 0): 1.0}, + ((0, 0), 'D'): {(1, 0): 1.0}, + ((0, 0), 'L'): {(0, 0): 1.0}, + ((0, 0), 'R'): {(0, 1): 1.0}, + ((0, 1), 'U'): {(0, 1): 1.0}, + ((0, 1), 'D'): {(0, 1): 1.0}, + ((0, 1), 'L'): {(0, 0): 1.0}, + ((0, 1), 'R'): {(0, 2): 1.0}, + ((0, 2), 'U'): {(0, 2): 1.0}, + ((0, 2), 'D'): {(1, 2): 1.0}, + ((0, 2), 'L'): {(0, 1): 1.0}, + ((0, 2), 'R'): {(0, 3): 1.0}, + ((2, 1), 'U'): {(2, 1): 1.0}, + ((2, 1), 'D'): {(2, 1): 1.0}, + ((2, 1), 'L'): {(2, 0): 1.0}, + ((2, 1), 'R'): {(2, 2): 1.0}, + ((2, 2), 'U'): {(1, 2): 1.0}, + ((2, 2), 'D'): {(2, 2): 1.0}, + ((2, 2), 'L'): {(2, 1): 1.0}, + ((2, 2), 'R'): {(2, 3): 1.0}, + ((2, 3), 'U'): {(1, 3): 1.0}, + ((2, 3), 'D'): {(2, 3): 1.0}, + ((2, 3), 'L'): {(2, 2): 1.0}, + ((2, 3), 'R'): {(2, 3): 1.0}, + ((1, 2), 'U'): {(0, 2): 0.5, (1, 3): 0.5}, + ((1, 2), 'D'): {(2, 2): 1.0}, + ((1, 2), 'L'): {(1, 2): 1.0}, + ((1, 2), 'R'): {(1, 3): 1.0}, + } + g.set(rewards, actions, probs) + return g + + +def windy_grid_no_wind(): + g = windy_grid() + g.probs[((1, 2), 'U')] = {(0, 2): 1.0} + return g + + + +def windy_grid_penalized(step_cost=-0.1): + g = WindyGrid(3, 4, (2, 0)) + rewards = { + (0, 0): step_cost, + (0, 1): step_cost, + (0, 2): step_cost, + (1, 0): step_cost, + (1, 2): step_cost, + (2, 0): step_cost, + (2, 1): step_cost, + (2, 2): step_cost, + (2, 3): step_cost, + (0, 3): 1, + (1, 3): -1 + } + actions = { + (0, 0): ('D', 'R'), + (0, 1): ('L', 'R'), + (0, 2): ('L', 'D', 'R'), + (1, 0): ('U', 'D'), + (1, 2): ('U', 'D', 'R'), + (2, 0): ('U', 'R'), + (2, 1): ('L', 'R'), + (2, 2): ('L', 'R', 'U'), + (2, 3): ('L', 'U'), + } + + # p(s' | s, a) represented as: + # KEY: (s, a) --> VALUE: {s': p(s' | s, a)} + probs = { + ((2, 0), 'U'): {(1, 0): 1.0}, + ((2, 0), 'D'): {(2, 0): 1.0}, + ((2, 0), 'L'): {(2, 0): 1.0}, + ((2, 0), 'R'): {(2, 1): 1.0}, + ((1, 0), 'U'): {(0, 0): 1.0}, + ((1, 0), 'D'): {(2, 0): 1.0}, + ((1, 0), 'L'): {(1, 0): 1.0}, + ((1, 0), 'R'): {(1, 0): 1.0}, + ((0, 0), 'U'): {(0, 0): 1.0}, + ((0, 0), 'D'): {(1, 0): 1.0}, + ((0, 0), 'L'): {(0, 0): 1.0}, + ((0, 0), 'R'): {(0, 1): 1.0}, + ((0, 1), 'U'): {(0, 1): 1.0}, + ((0, 1), 'D'): {(0, 1): 1.0}, + ((0, 1), 'L'): {(0, 0): 1.0}, + ((0, 1), 'R'): {(0, 2): 1.0}, + ((0, 2), 'U'): {(0, 2): 1.0}, + ((0, 2), 'D'): {(1, 2): 1.0}, + ((0, 2), 'L'): {(0, 1): 1.0}, + ((0, 2), 'R'): {(0, 3): 1.0}, + ((2, 1), 'U'): {(2, 1): 1.0}, + ((2, 1), 'D'): {(2, 1): 1.0}, + ((2, 1), 'L'): {(2, 0): 1.0}, + ((2, 1), 'R'): {(2, 2): 1.0}, + ((2, 2), 'U'): {(1, 2): 1.0}, + ((2, 2), 'D'): {(2, 2): 1.0}, + ((2, 2), 'L'): {(2, 1): 1.0}, + ((2, 2), 'R'): {(2, 3): 1.0}, + ((2, 3), 'U'): {(1, 3): 1.0}, + ((2, 3), 'D'): {(2, 3): 1.0}, + ((2, 3), 'L'): {(2, 2): 1.0}, + ((2, 3), 'R'): {(2, 3): 1.0}, + ((1, 2), 'U'): {(0, 2): 0.5, (1, 3): 0.5}, + ((1, 2), 'D'): {(2, 2): 1.0}, + ((1, 2), 'L'): {(1, 2): 1.0}, + ((1, 2), 'R'): {(1, 3): 1.0}, + } + g.set(rewards, actions, probs) + return g + + + +def grid_5x5(step_cost=-0.1): + g = Grid(5, 5, (4, 0)) + rewards = {(0, 4): 1, (1, 4): -1} + actions = { + (0, 0): ('D', 'R'), + (0, 1): ('L', 'R'), + (0, 2): ('L', 'R'), + (0, 3): ('L', 'D', 'R'), + (1, 0): ('U', 'D', 'R'), + (1, 1): ('U', 'D', 'L'), + (1, 3): ('U', 'D', 'R'), + (2, 0): ('U', 'D', 'R'), + (2, 1): ('U', 'L', 'R'), + (2, 2): ('L', 'R', 'D'), + (2, 3): ('L', 'R', 'U'), + (2, 4): ('L', 'U', 'D'), + (3, 0): ('U', 'D'), + (3, 2): ('U', 'D'), + (3, 4): ('U', 'D'), + (4, 0): ('U', 'R'), + (4, 1): ('L', 'R'), + (4, 2): ('L', 'R', 'U'), + (4, 3): ('L', 'R'), + (4, 4): ('L', 'U'), + } + g.set(rewards, actions) + + # non-terminal states + visitable_states = actions.keys() + for s in visitable_states: + g.rewards[s] = step_cost + + return g + diff --git a/rl/iterative_policy_evaluation.py b/rl/iterative_policy_evaluation.py deleted file mode 100644 index fea8f438..00000000 --- a/rl/iterative_policy_evaluation.py +++ /dev/null @@ -1,120 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -from grid_world import standard_grid - -SMALL_ENOUGH = 1e-3 # threshold for convergence - -def print_values(V, g): - for i in range(g.rows): - print("---------------------------") - for j in range(g.cols): - v = V.get((i,j), 0) - if v >= 0: - print(" %.2f|" % v, end="") - else: - print("%.2f|" % v, end="") # -ve sign takes up an extra space - print("") - - -def print_policy(P, g): - for i in range(g.rows): - print("---------------------------") - for j in range(g.cols): - a = P.get((i,j), ' ') - print(" %s |" % a, end="") - print("") - -if __name__ == '__main__': - # iterative policy evaluation - # given a policy, let's find it's value function V(s) - # we will do this for both a uniform random policy and fixed policy - # NOTE: - # there are 2 sources of randomness - # p(a|s) - deciding what action to take given the state - # p(s',r|s,a) - the next state and reward given your action-state pair - # we are only modeling p(a|s) = uniform - # how would the code change if p(s',r|s,a) is not deterministic? - grid = standard_grid() - - # states will be positions (i,j) - # simpler than tic-tac-toe because we only have one "game piece" - # that can only be at one position at a time - states = grid.all_states() - - ### uniformly random actions ### - # initialize V(s) = 0 - V = {} - for s in states: - V[s] = 0 - gamma = 1.0 # discount factor - # repeat until convergence - while True: - biggest_change = 0 - for s in states: - old_v = V[s] - - # V(s) only has value if it's not a terminal state - if s in grid.actions: - - new_v = 0 # we will accumulate the answer - p_a = 1.0 / len(grid.actions[s]) # each action has equal probability - for a in grid.actions[s]: - grid.set_state(s) - r = grid.move(a) - new_v += p_a * (r + gamma * V[grid.current_state()]) - V[s] = new_v - biggest_change = max(biggest_change, np.abs(old_v - V[s])) - - if biggest_change < SMALL_ENOUGH: - break - print("values for uniformly random actions:") - print_values(V, grid) - print("\n\n") - - ### fixed policy ### - policy = { - (2, 0): 'U', - (1, 0): 'U', - (0, 0): 'R', - (0, 1): 'R', - (0, 2): 'R', - (1, 2): 'R', - (2, 1): 'R', - (2, 2): 'R', - (2, 3): 'U', - } - print_policy(policy, grid) - - # initialize V(s) = 0 - V = {} - for s in states: - V[s] = 0 - - # let's see how V(s) changes as we get further away from the reward - gamma = 0.9 # discount factor - - # repeat until convergence - while True: - biggest_change = 0 - for s in states: - old_v = V[s] - - # V(s) only has value if it's not a terminal state - if s in policy: - a = policy[s] - grid.set_state(s) - r = grid.move(a) - V[s] = r + gamma * V[grid.current_state()] - biggest_change = max(biggest_change, np.abs(old_v - V[s])) - - if biggest_change < SMALL_ENOUGH: - break - print("values for fixed policy:") - print_values(V, grid) diff --git a/rl/iterative_policy_evaluation_deterministic.py b/rl/iterative_policy_evaluation_deterministic.py new file mode 100644 index 00000000..06ddc479 --- /dev/null +++ b/rl/iterative_policy_evaluation_deterministic.py @@ -0,0 +1,111 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from grid_world import standard_grid, ACTION_SPACE + +SMALL_ENOUGH = 1e-3 # threshold for convergence + + +def print_values(V, g): + for i in range(g.rows): + print("---------------------------") + for j in range(g.cols): + v = V.get((i,j), 0) + if v >= 0: + print(" %.2f|" % v, end="") + else: + print("%.2f|" % v, end="") # -ve sign takes up an extra space + print("") + + +def print_policy(P, g): + for i in range(g.rows): + print("---------------------------") + for j in range(g.cols): + a = P.get((i,j), ' ') + print(" %s |" % a, end="") + print("") + + + +if __name__ == '__main__': + + ### define transition probabilities and grid ### + # the key is (s, a, s'), the value is the probability + # that is, transition_probs[(s, a, s')] = p(s' | s, a) + # any key NOT present will considered to be impossible (i.e. probability 0) + transition_probs = {} + + # to reduce the dimensionality of the dictionary, we'll use deterministic + # rewards, r(s, a, s') + # note: you could make it simpler by using r(s') since the reward doesn't + # actually depend on (s, a) + rewards = {} + + grid = standard_grid() + for i in range(grid.rows): + for j in range(grid.cols): + s = (i, j) + if not grid.is_terminal(s): + for a in ACTION_SPACE: + s2 = grid.get_next_state(s, a) + transition_probs[(s, a, s2)] = 1 + if s2 in grid.rewards: + rewards[(s, a, s2)] = grid.rewards[s2] + + ### fixed policy ### + policy = { + (2, 0): 'U', + (1, 0): 'U', + (0, 0): 'R', + (0, 1): 'R', + (0, 2): 'R', + (1, 2): 'U', + (2, 1): 'R', + (2, 2): 'U', + (2, 3): 'L', + } + print_policy(policy, grid) + + # initialize V(s) = 0 + V = {} + for s in grid.all_states(): + V[s] = 0 + + gamma = 0.9 # discount factor + + # repeat until convergence + it = 0 + while True: + biggest_change = 0 + for s in grid.all_states(): + if not grid.is_terminal(s): + old_v = V[s] + new_v = 0 # we will accumulate the answer + for a in ACTION_SPACE: + for s2 in grid.all_states(): + + # action probability is deterministic + action_prob = 1 if policy.get(s) == a else 0 + + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + new_v += action_prob * transition_probs.get((s, a, s2), 0) * (r + gamma * V[s2]) + + # after done getting the new value, update the value table + V[s] = new_v + biggest_change = max(biggest_change, np.abs(old_v - V[s])) + + print("iter:", it, "biggest_change:", biggest_change) + print_values(V, grid) + it += 1 + + if biggest_change < SMALL_ENOUGH: + break + print("\n\n") diff --git a/rl/iterative_policy_evaluation_probabilistic.py b/rl/iterative_policy_evaluation_probabilistic.py new file mode 100644 index 00000000..07d019c0 --- /dev/null +++ b/rl/iterative_policy_evaluation_probabilistic.py @@ -0,0 +1,112 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from grid_world import windy_grid, ACTION_SPACE + +SMALL_ENOUGH = 1e-3 # threshold for convergence + + +def print_values(V, g): + for i in range(g.rows): + print("---------------------------") + for j in range(g.cols): + v = V.get((i,j), 0) + if v >= 0: + print(" %.2f|" % v, end="") + else: + print("%.2f|" % v, end="") # -ve sign takes up an extra space + print("") + + +def print_policy(P, g): + for i in range(g.rows): + print("---------------------------") + for j in range(g.cols): + a = P.get((i,j), ' ') + print(" %s |" % a, end="") + print("") + + + +if __name__ == '__main__': + + ### define transition probabilities and grid ### + # the key is (s, a, s'), the value is the probability + # that is, transition_probs[(s, a, s')] = p(s' | s, a) + # any key NOT present will considered to be impossible (i.e. probability 0) + # we can take this from the grid object and convert it to the format we want + transition_probs = {} + + # to reduce the dimensionality of the dictionary, we'll use deterministic + # rewards, r(s, a, s') + # note: you could make it simpler by using r(s') since the reward doesn't + # actually depend on (s, a) + rewards = {} + + grid = windy_grid() + for (s, a), v in grid.probs.items(): + for s2, p in v.items(): + transition_probs[(s, a, s2)] = p + rewards[(s, a, s2)] = grid.rewards.get(s2, 0) + + ### probabilistic policy ### + policy = { + (2, 0): {'U': 0.5, 'R': 0.5}, + (1, 0): {'U': 1.0}, + (0, 0): {'R': 1.0}, + (0, 1): {'R': 1.0}, + (0, 2): {'R': 1.0}, + (1, 2): {'U': 1.0}, + (2, 1): {'R': 1.0}, + (2, 2): {'U': 1.0}, + (2, 3): {'L': 1.0}, + } + print_policy(policy, grid) + + # initialize V(s) = 0 + V = {} + for s in grid.all_states(): + V[s] = 0 + + gamma = 0.9 # discount factor + + # repeat until convergence + it = 0 + while True: + biggest_change = 0 + for s in grid.all_states(): + if not grid.is_terminal(s): + old_v = V[s] + new_v = 0 # we will accumulate the answer + for a in ACTION_SPACE: + for s2 in grid.all_states(): + + # action probability is deterministic + action_prob = policy[s].get(a, 0) + + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + new_v += action_prob * transition_probs.get((s, a, s2), 0) * (r + gamma * V[s2]) + + # after done getting the new value, update the value table + V[s] = new_v + biggest_change = max(biggest_change, np.abs(old_v - V[s])) + + print("iter:", it, "biggest_change:", biggest_change) + print_values(V, grid) + it += 1 + + if biggest_change < SMALL_ENOUGH: + break + print("V:", V) + print("\n\n") + + # sanity check + # at state (1, 2), value is 0.5 * 0.9 * 1 + 0.5 * (-1) = -0.05 + diff --git a/rl/linear_rl_trader.py b/rl/linear_rl_trader.py new file mode 100644 index 00000000..9bda27e7 --- /dev/null +++ b/rl/linear_rl_trader.py @@ -0,0 +1,385 @@ +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from datetime import datetime +import itertools +import argparse +import re +import os +import pickle + +from sklearn.preprocessing import StandardScaler + + +# Let's use AAPL (Apple), MSI (Motorola), SBUX (Starbucks) +def get_data(): + # returns a T x 3 list of stock prices + # each row is a different stock + # 0 = AAPL + # 1 = MSI + # 2 = SBUX + df = pd.read_csv('../tf2.0/aapl_msi_sbux.csv') + return df.values + + + + + +def get_scaler(env): + # return scikit-learn scaler object to scale the states + # Note: you could also populate the replay buffer here + + states = [] + for _ in range(env.n_step): + action = np.random.choice(env.action_space) + state, reward, done, info = env.step(action) + states.append(state) + if done: + break + + scaler = StandardScaler() + scaler.fit(states) + return scaler + + + + +def maybe_make_dir(directory): + if not os.path.exists(directory): + os.makedirs(directory) + + + +class LinearModel: + """ A linear regression model """ + def __init__(self, input_dim, n_action): + self.W = np.random.randn(input_dim, n_action) / np.sqrt(input_dim) + self.b = np.zeros(n_action) + + # momentum terms + self.vW = 0 + self.vb = 0 + + self.losses = [] + + def predict(self, X): + # make sure X is N x D + assert(len(X.shape) == 2) + return X.dot(self.W) + self.b + + def sgd(self, X, Y, learning_rate=0.01, momentum=0.9): + # make sure X is N x D + assert(len(X.shape) == 2) + + # the loss values are 2-D + # normally we would divide by N only + # but now we divide by N x K + num_values = np.prod(Y.shape) + + # do one step of gradient descent + # we multiply by 2 to get the exact gradient + # (not adjusting the learning rate) + # i.e. d/dx (x^2) --> 2x + Yhat = self.predict(X) + gW = 2 * X.T.dot(Yhat - Y) / num_values + gb = 2 * (Yhat - Y).sum(axis=0) / num_values + + # update momentum terms + self.vW = momentum * self.vW - learning_rate * gW + self.vb = momentum * self.vb - learning_rate * gb + + # update params + self.W += self.vW + self.b += self.vb + + mse = np.mean((Yhat - Y)**2) + self.losses.append(mse) + + def load_weights(self, filepath): + npz = np.load(filepath) + self.W = npz['W'] + self.b = npz['b'] + + def save_weights(self, filepath): + np.savez(filepath, W=self.W, b=self.b) + + + + +class MultiStockEnv: + """ + A 3-stock trading environment. + State: vector of size 7 (n_stock * 2 + 1) + - # shares of stock 1 owned + - # shares of stock 2 owned + - # shares of stock 3 owned + - price of stock 1 (using daily close price) + - price of stock 2 + - price of stock 3 + - cash owned (can be used to purchase more stocks) + Action: categorical variable with 27 (3^3) possibilities + - for each stock, you can: + - 0 = sell + - 1 = hold + - 2 = buy + """ + def __init__(self, data, initial_investment=20000): + # data + self.stock_price_history = data + self.n_step, self.n_stock = self.stock_price_history.shape + + # instance attributes + self.initial_investment = initial_investment + self.cur_step = None + self.stock_owned = None + self.stock_price = None + self.cash_in_hand = None + + self.action_space = np.arange(3**self.n_stock) + + # action permutations + # returns a nested list with elements like: + # [0,0,0] + # [0,0,1] + # [0,0,2] + # [0,1,0] + # [0,1,1] + # etc. + # 0 = sell + # 1 = hold + # 2 = buy + self.action_list = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock))) + + # calculate size of state + self.state_dim = self.n_stock * 2 + 1 + + self.reset() + + + def reset(self): + self.cur_step = 0 + self.stock_owned = np.zeros(self.n_stock) + self.stock_price = self.stock_price_history[self.cur_step] + self.cash_in_hand = self.initial_investment + return self._get_obs() + + + def step(self, action): + assert action in self.action_space + + # get current value before performing the action + prev_val = self._get_val() + + # update price, i.e. go to the next day + self.cur_step += 1 + self.stock_price = self.stock_price_history[self.cur_step] + + # perform the trade + self._trade(action) + + # get the new value after taking the action + cur_val = self._get_val() + + # reward is the increase in porfolio value + reward = cur_val - prev_val + + # done if we have run out of data + done = self.cur_step == self.n_step - 1 + + # store the current value of the portfolio here + info = {'cur_val': cur_val} + + # conform to the Gym API + return self._get_obs(), reward, done, info + + + def _get_obs(self): + obs = np.empty(self.state_dim) + obs[:self.n_stock] = self.stock_owned + obs[self.n_stock:2*self.n_stock] = self.stock_price + obs[-1] = self.cash_in_hand + return obs + + + + def _get_val(self): + return self.stock_owned.dot(self.stock_price) + self.cash_in_hand + + + def _trade(self, action): + # index the action we want to perform + # 0 = sell + # 1 = hold + # 2 = buy + # e.g. [2,1,0] means: + # buy first stock + # hold second stock + # sell third stock + action_vec = self.action_list[action] + + # determine which stocks to buy or sell + sell_index = [] # stores index of stocks we want to sell + buy_index = [] # stores index of stocks we want to buy + for i, a in enumerate(action_vec): + if a == 0: + sell_index.append(i) + elif a == 2: + buy_index.append(i) + + # sell any stocks we want to sell + # then buy any stocks we want to buy + if sell_index: + # NOTE: to simplify the problem, when we sell, we will sell ALL shares of that stock + for i in sell_index: + self.cash_in_hand += self.stock_price[i] * self.stock_owned[i] + self.stock_owned[i] = 0 + if buy_index: + # NOTE: when buying, we will loop through each stock we want to buy, + # and buy one share at a time until we run out of cash + can_buy = True + while can_buy: + for i in buy_index: + if self.cash_in_hand > self.stock_price[i]: + self.stock_owned[i] += 1 # buy one share + self.cash_in_hand -= self.stock_price[i] + else: + can_buy = False + + + + + +class DQNAgent(object): + def __init__(self, state_size, action_size): + self.state_size = state_size + self.action_size = action_size + self.gamma = 0.95 # discount rate + self.epsilon = 1.0 # exploration rate + self.epsilon_min = 0.01 + self.epsilon_decay = 0.995 + self.model = LinearModel(state_size, action_size) + + def act(self, state): + if np.random.rand() <= self.epsilon: + return np.random.choice(self.action_size) + act_values = self.model.predict(state) + return np.argmax(act_values[0]) # returns action + + + def train(self, state, action, reward, next_state, done): + if done: + target = reward + else: + target = reward + self.gamma * np.amax(self.model.predict(next_state), axis=1) + + target_full = self.model.predict(state) + target_full[0, action] = target + + # Run one training step + self.model.sgd(state, target_full) + + if self.epsilon > self.epsilon_min: + self.epsilon *= self.epsilon_decay + + + def load(self, name): + self.model.load_weights(name) + + + def save(self, name): + self.model.save_weights(name) + + +def play_one_episode(agent, env, is_train): + # note: after transforming states are already 1xD + state = env.reset() + state = scaler.transform([state]) + done = False + + while not done: + action = agent.act(state) + next_state, reward, done, info = env.step(action) + next_state = scaler.transform([next_state]) + if is_train == 'train': + agent.train(state, action, reward, next_state, done) + state = next_state + + return info['cur_val'] + + + +if __name__ == '__main__': + + # config + models_folder = 'linear_rl_trader_models' + rewards_folder = 'linear_rl_trader_rewards' + num_episodes = 2000 + batch_size = 32 + initial_investment = 20000 + + + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') + args = parser.parse_args() + + maybe_make_dir(models_folder) + maybe_make_dir(rewards_folder) + + data = get_data() + n_timesteps, n_stocks = data.shape + + n_train = n_timesteps // 2 + + train_data = data[:n_train] + test_data = data[n_train:] + + env = MultiStockEnv(train_data, initial_investment) + state_size = env.state_dim + action_size = len(env.action_space) + agent = DQNAgent(state_size, action_size) + scaler = get_scaler(env) + + # store the final value of the portfolio (end of episode) + portfolio_value = [] + + if args.mode == 'test': + # then load the previous scaler + with open(f'{models_folder}/scaler.pkl', 'rb') as f: + scaler = pickle.load(f) + + # remake the env with test data + env = MultiStockEnv(test_data, initial_investment) + + # make sure epsilon is not 1! + # no need to run multiple episodes if epsilon = 0, it's deterministic + agent.epsilon = 0.01 + + # load trained weights + agent.load(f'{models_folder}/linear.npz') + + # play the game num_episodes times + for e in range(num_episodes): + t0 = datetime.now() + val = play_one_episode(agent, env, args.mode) + dt = datetime.now() - t0 + print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}") + portfolio_value.append(val) # append episode end portfolio value + + # save the weights when we are done + if args.mode == 'train': + # save the DQN + agent.save(f'{models_folder}/linear.npz') + + # save the scaler + with open(f'{models_folder}/scaler.pkl', 'wb') as f: + pickle.dump(scaler, f) + + # plot losses + plt.plot(agent.model.losses) + plt.show() + + + # save portfolio value for each episode + np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) diff --git a/rl/monte_carlo.py b/rl/monte_carlo.py index b24dee2f..aedf786f 100644 --- a/rl/monte_carlo.py +++ b/rl/monte_carlo.py @@ -10,13 +10,11 @@ from grid_world import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy -SMALL_ENOUGH = 1e-3 GAMMA = 0.9 -ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') # NOTE: this is only policy evaluation, not optimization -def play_game(grid, policy): +def play_game(grid, policy, max_steps=20): # returns a list of states and corresponding returns # reset game to start at a random position @@ -27,27 +25,34 @@ def play_game(grid, policy): grid.set_state(start_states[start_idx]) s = grid.current_state() - states_and_rewards = [(s, 0)] # list of tuples of (state, reward) + + # keep track of all states and rewards encountered + states = [s] + rewards = [0] + + steps = 0 while not grid.game_over(): a = policy[s] r = grid.move(a) - s = grid.current_state() - states_and_rewards.append((s, r)) - # calculate the returns by working backwards from the terminal state - G = 0 - states_and_returns = [] - first = True - for s, r in reversed(states_and_rewards): - # the value of the terminal state is 0 by definition - # we should ignore the first state we encounter - # and ignore the last G, which is meaningless since it doesn't correspond to any move - if first: - first = False - else: - states_and_returns.append((s, G)) - G = r + GAMMA*G - states_and_returns.reverse() # we want it to be in order of state visited - return states_and_returns + next_s = grid.current_state() + + # update states and rewards lists + states.append(next_s) + rewards.append(r) + + steps += 1 + if steps >= max_steps: + break + + # update state + # note: there is no need to store the final terminal state + s = next_s + + # we want to return: + # states = [s(0), s(1), ..., S(T)] + # rewards = [R(0), R(1), ..., R(T)] + + return states, rewards if __name__ == '__main__': @@ -84,18 +89,20 @@ def play_game(grid, policy): V[s] = 0 # repeat - for t in range(100): - + for _ in range(100): # generate an episode using pi - states_and_returns = play_game(grid, policy) - seen_states = set() - for s, G in states_and_returns: - # check if we have already seen s - # called "first-visit" MC policy evaluation - if s not in seen_states: + states, rewards = play_game(grid, policy) + G = 0 + T = len(states) + for t in range(T - 2, -1, -1): + s = states[t] + r = rewards[t+1] + G = r + GAMMA * G # update return + + # we'll use first-visit Monte Carlo + if s not in states[:t]: returns[s].append(G) V[s] = np.mean(returns[s]) - seen_states.add(s) print("values:") print_values(V, grid) diff --git a/rl/monte_carlo_es.py b/rl/monte_carlo_es.py index 79fca9f1..8f5f8573 100644 --- a/rl/monte_carlo_es.py +++ b/rl/monte_carlo_es.py @@ -17,9 +17,8 @@ # NOTE: this script implements the Monte Carlo Exploring-Starts method # for finding the optimal policy -def play_game(grid, policy): - # returns a list of states and corresponding returns +def play_game(grid, policy, max_steps=20): # reset game to start at a random position # we need to do this if we have a deterministic policy # we would never end up at certain states, but we still want to measure their value @@ -31,72 +30,57 @@ def play_game(grid, policy): s = grid.current_state() a = np.random.choice(ALL_POSSIBLE_ACTIONS) # first action is uniformly random - # be aware of the timing - # each triple is s(t), a(t), r(t) - # but r(t) results from taking action a(t-1) from s(t-1) and landing in s(t) - states_actions_rewards = [(s, a, 0)] - seen_states = set() - seen_states.add(grid.current_state()) - num_steps = 0 - while True: + states = [s] + actions = [a] + rewards = [0] + + for _ in range(max_steps): r = grid.move(a) - num_steps += 1 s = grid.current_state() - if s in seen_states: - # hack so that we don't end up in an infinitely long episode - # bumping into the wall repeatedly - # if num_steps == 1 -> bumped into a wall and haven't moved anywhere - # reward = -10 - # else: - # reward = falls off by 1 / num_steps - reward = -10. / num_steps - states_actions_rewards.append((s, None, reward)) - break - elif grid.game_over(): - states_actions_rewards.append((s, None, r)) + rewards.append(r) + states.append(s) + + if grid.game_over(): break else: a = policy[s] - states_actions_rewards.append((s, a, r)) - seen_states.add(s) - - # calculate the returns by working backwards from the terminal state - G = 0 - states_actions_returns = [] - first = True - for s, a, r in reversed(states_actions_rewards): - # the value of the terminal state is 0 by definition - # we should ignore the first state we encounter - # and ignore the last G, which is meaningless since it doesn't correspond to any move - if first: - first = False - else: - states_actions_returns.append((s, a, G)) - G = r + GAMMA*G - states_actions_returns.reverse() # we want it to be in order of state visited - return states_actions_returns + actions.append(a) + + # we want to return: + # states = [s(0), s(1), ..., s(T-1), s(T)] + # actions = [a(0), a(1), ..., a(T-1), ] + # rewards = [ 0, R(1), ..., R(T-1), R(T)] + + return states, actions, rewards def max_dict(d): # returns the argmax (key) and max (value) from a dictionary # put this into a function since we are using it so often - max_key = None - max_val = float('-inf') - for k, v in d.items(): - if v > max_val: - max_val = v - max_key = k - return max_key, max_val + + # find max val + max_val = max(d.values()) + + # find keys corresponding to max val + max_keys = [key for key, val in d.items() if val == max_val] + + ### slow version + # max_keys = [] + # for key, val in d.items(): + # if val == max_val: + # max_keys.append(key) + + return np.random.choice(max_keys), max_val if __name__ == '__main__': # use the standard grid again (0 for every step) so that we can compare # to iterative policy evaluation - # grid = standard_grid() + grid = standard_grid() # try the negative grid too, to see if agent will learn to go past the "bad spot" # in order to minimize number of steps - grid = negative_grid(step_cost=-0.9) + # grid = negative_grid(step_cost=-0.1) # print rewards print("rewards:") @@ -110,44 +94,56 @@ def max_dict(d): # initialize Q(s,a) and returns Q = {} - returns = {} # dictionary of state -> list of returns we've received + sample_counts = {} states = grid.all_states() for s in states: if s in grid.actions: # not a terminal state Q[s] = {} + sample_counts[s] = {} for a in ALL_POSSIBLE_ACTIONS: - Q[s][a] = 0 # needs to be initialized to something so we can argmax it - returns[(s,a)] = [] + Q[s][a] = 0 + sample_counts[s][a] = 0 else: # terminal state or state we can't otherwise get to pass # repeat until convergence deltas = [] - for t in range(2000): - if t % 100 == 0: - print(t) + for it in range(10000): + if it % 1000 == 0: + print(it) # generate an episode using pi biggest_change = 0 - states_actions_returns = play_game(grid, policy) - seen_state_action_pairs = set() - for s, a, G in states_actions_returns: - # check if we have already seen s - # called "first-visit" MC policy evaluation - sa = (s, a) - if sa not in seen_state_action_pairs: + states, actions, rewards = play_game(grid, policy) + + # create a list of only state-action pairs for lookup + states_actions = list(zip(states, actions)) + + T = len(states) + G = 0 + for t in range(T - 2, -1, -1): + # retrieve current s, a, r tuple + s = states[t] + a = actions[t] + + # update G + G = rewards[t+1] + GAMMA * G + + # check if we have already seen (s, a) ("first-visit") + if (s, a) not in states_actions[:t]: old_q = Q[s][a] - returns[sa].append(G) - Q[s][a] = np.mean(returns[sa]) + sample_counts[s][a] += 1 + lr = 1 / sample_counts[s][a] + Q[s][a] = old_q + lr * (G - old_q) + + # update policy + policy[s] = max_dict(Q[s])[0] + + # update delta biggest_change = max(biggest_change, np.abs(old_q - Q[s][a])) - seen_state_action_pairs.add(sa) deltas.append(biggest_change) - # update policy - for s in policy.keys(): - policy[s] = max_dict(Q[s])[0] - plt.plot(deltas) plt.show() diff --git a/rl/monte_carlo_no_es.py b/rl/monte_carlo_no_es.py index e079da38..245ccb2f 100644 --- a/rl/monte_carlo_no_es.py +++ b/rl/monte_carlo_no_es.py @@ -7,80 +7,83 @@ import numpy as np +import pandas as pd import matplotlib.pyplot as plt from grid_world import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy -from monte_carlo_es import max_dict GAMMA = 0.9 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') -# NOTE: find optimal policy and value function -# using on-policy first-visit MC -def random_action(a, eps=0.1): - # choose given a with probability 1 - eps + eps/4 - # choose some other a' != a with probability eps/4 + +def epsilon_greedy(policy, s, eps=0.1): p = np.random.random() - # if p < (1 - eps + eps/len(ALL_POSSIBLE_ACTIONS)): - # return a - # else: - # tmp = list(ALL_POSSIBLE_ACTIONS) - # tmp.remove(a) - # return np.random.choice(tmp) - # - # this is equivalent to the above if p < (1 - eps): - return a + return policy[s] else: return np.random.choice(ALL_POSSIBLE_ACTIONS) -def play_game(grid, policy): - # returns a list of states and corresponding returns - # in this version we will NOT use "exploring starts" method - # instead we will explore using an epsilon-soft policy - s = (2, 0) - grid.set_state(s) - a = random_action(policy[s]) - - # be aware of the timing - # each triple is s(t), a(t), r(t) - # but r(t) results from taking action a(t-1) from s(t-1) and landing in s(t) - states_actions_rewards = [(s, a, 0)] - while True: + +def play_game(grid, policy, max_steps=20): + + # start state + s = grid.reset() + + # choose action + a = epsilon_greedy(policy, s) + + states = [s] + actions = [a] + rewards = [0] + + for _ in range(max_steps): r = grid.move(a) s = grid.current_state() + + rewards.append(r) + states.append(s) + if grid.game_over(): - states_actions_rewards.append((s, None, r)) break else: - a = random_action(policy[s]) # the next state is stochastic - states_actions_rewards.append((s, a, r)) - - # calculate the returns by working backwards from the terminal state - G = 0 - states_actions_returns = [] - first = True - for s, a, r in reversed(states_actions_rewards): - # the value of the terminal state is 0 by definition - # we should ignore the first state we encounter - # and ignore the last G, which is meaningless since it doesn't correspond to any move - if first: - first = False - else: - states_actions_returns.append((s, a, G)) - G = r + GAMMA*G - states_actions_returns.reverse() # we want it to be in order of state visited - return states_actions_returns + a = epsilon_greedy(policy, s) + actions.append(a) + + # we want to return: + # states = [s(0), s(1), ..., s(T-1), s(T)] + # actions = [a(0), a(1), ..., a(T-1), ] + # rewards = [ 0, R(1), ..., R(T-1), R(T)] + + return states, actions, rewards + + +def max_dict(d): + # returns the argmax (key) and max (value) from a dictionary + # put this into a function since we are using it so often + + # find max val + max_val = max(d.values()) + + # find keys corresponding to max val + max_keys = [key for key, val in d.items() if val == max_val] + + ### slow version + # max_keys = [] + # for key, val in d.items(): + # if val == max_val: + # max_keys.append(key) + + return np.random.choice(max_keys), max_val if __name__ == '__main__': # use the standard grid again (0 for every step) so that we can compare # to iterative policy evaluation - # grid = standard_grid() + grid = standard_grid() # try the negative grid too, to see if agent will learn to go past the "bad spot" # in order to minimize number of steps - grid = negative_grid(step_cost=-0.1) + # grid = negative_grid(step_cost=-0.1) # print rewards print("rewards:") @@ -94,58 +97,80 @@ def play_game(grid, policy): # initialize Q(s,a) and returns Q = {} - returns = {} # dictionary of state -> list of returns we've received + sample_counts = {} + state_sample_count = {} states = grid.all_states() for s in states: if s in grid.actions: # not a terminal state Q[s] = {} + sample_counts[s] = {} + state_sample_count[s] = 0 for a in ALL_POSSIBLE_ACTIONS: Q[s][a] = 0 - returns[(s,a)] = [] + sample_counts[s][a] = 0 else: # terminal state or state we can't otherwise get to pass # repeat until convergence deltas = [] - for t in range(5000): - if t % 1000 == 0: - print(t) + for it in range(10000): + if it % 1000 == 0: + print(it) # generate an episode using pi biggest_change = 0 - states_actions_returns = play_game(grid, policy) - - # calculate Q(s,a) - seen_state_action_pairs = set() - for s, a, G in states_actions_returns: - # check if we have already seen s - # called "first-visit" MC policy evaluation - sa = (s, a) - if sa not in seen_state_action_pairs: + states, actions, rewards = play_game(grid, policy) + + # create a list of only state-action pairs for lookup + states_actions = list(zip(states, actions)) + + T = len(states) + G = 0 + for t in range(T - 2, -1, -1): + # retrieve current s, a, r tuple + s = states[t] + a = actions[t] + + # update G + G = rewards[t+1] + GAMMA * G + + # check if we have already seen (s, a) ("first-visit") + if (s, a) not in states_actions[:t]: old_q = Q[s][a] - returns[sa].append(G) - Q[s][a] = np.mean(returns[sa]) + sample_counts[s][a] += 1 + lr = 1 / sample_counts[s][a] + Q[s][a] = old_q + lr * (G - old_q) + + # update policy + policy[s] = max_dict(Q[s])[0] + + # update state sample count + state_sample_count[s] += 1 + + # update delta biggest_change = max(biggest_change, np.abs(old_q - Q[s][a])) - seen_state_action_pairs.add(sa) deltas.append(biggest_change) - # calculate new policy pi(s) = argmax[a]{ Q(s,a) } - for s in policy.keys(): - a, _ = max_dict(Q[s]) - policy[s] = a - plt.plot(deltas) plt.show() - # find the optimal state-value function - # V(s) = max[a]{ Q(s,a) } + print("final policy:") + print_policy(policy, grid) + + # find V V = {} - for s in policy.keys(): + for s, Qs in Q.items(): V[s] = max_dict(Q[s])[1] print("final values:") print_values(V, grid) - print("final policy:") - print_policy(policy, grid) + print("state_sample_count:") + state_sample_count_arr = np.zeros((grid.rows, grid.cols)) + for i in range(grid.rows): + for j in range(grid.cols): + if (i, j) in state_sample_count: + state_sample_count_arr[i,j] = state_sample_count[(i, j)] + df = pd.DataFrame(state_sample_count_arr) + print(df) diff --git a/rl/monte_carlo_random.py b/rl/monte_carlo_random.py deleted file mode 100644 index 9498b2e1..00000000 --- a/rl/monte_carlo_random.py +++ /dev/null @@ -1,131 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy - -SMALL_ENOUGH = 1e-3 -GAMMA = 0.9 -ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') - -# NOTE: this is only policy evaluation, not optimization - -def random_action(a): - # choose given a with probability 0.5 - # choose some other a' != a with probability 0.5/3 - p = np.random.random() - if p < 0.5: - return a - else: - tmp = list(ALL_POSSIBLE_ACTIONS) - tmp.remove(a) - return np.random.choice(tmp) - -def play_game(grid, policy): - # returns a list of states and corresponding returns - - # reset game to start at a random position - # we need to do this, because given our current deterministic policy - # we would never end up at certain states, but we still want to measure their value - start_states = list(grid.actions.keys()) - start_idx = np.random.choice(len(start_states)) - grid.set_state(start_states[start_idx]) - - s = grid.current_state() - states_and_rewards = [(s, 0)] # list of tuples of (state, reward) - while not grid.game_over(): - a = policy[s] - a = random_action(a) - r = grid.move(a) - s = grid.current_state() - states_and_rewards.append((s, r)) - # calculate the returns by working backwards from the terminal state - G = 0 - states_and_returns = [] - first = True - for s, r in reversed(states_and_rewards): - # the value of the terminal state is 0 by definition - # we should ignore the first state we encounter - # and ignore the last G, which is meaningless since it doesn't correspond to any move - if first: - first = False - else: - states_and_returns.append((s, G)) - G = r + GAMMA*G - states_and_returns.reverse() # we want it to be in order of state visited - return states_and_returns - - -if __name__ == '__main__': - # use the standard grid again (0 for every step) so that we can compare - # to iterative policy evaluation - grid = standard_grid() - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # state -> action - # found by policy_iteration_random on standard_grid - # MC method won't get exactly this, but should be close - # values: - # --------------------------- - # 0.43| 0.56| 0.72| 0.00| - # --------------------------- - # 0.33| 0.00| 0.21| 0.00| - # --------------------------- - # 0.25| 0.18| 0.11| -0.17| - # policy: - # --------------------------- - # R | R | R | | - # --------------------------- - # U | | U | | - # --------------------------- - # U | L | U | L | - policy = { - (2, 0): 'U', - (1, 0): 'U', - (0, 0): 'R', - (0, 1): 'R', - (0, 2): 'R', - (1, 2): 'U', - (2, 1): 'L', - (2, 2): 'U', - (2, 3): 'L', - } - - # initialize V(s) and returns - V = {} - returns = {} # dictionary of state -> list of returns we've received - states = grid.all_states() - for s in states: - if s in grid.actions: - returns[s] = [] - else: - # terminal state or state we can't otherwise get to - V[s] = 0 - - # repeat until convergence - for t in range(5000): - - # generate an episode using pi - states_and_returns = play_game(grid, policy) - seen_states = set() - for s, G in states_and_returns: - # check if we have already seen s - # called "first-visit" MC policy evaluation - if s not in seen_states: - returns[s].append(G) - V[s] = np.mean(returns[s]) - seen_states.add(s) - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) diff --git a/rl/optimistic.py b/rl/optimistic.py new file mode 100644 index 00000000..1d024fef --- /dev/null +++ b/rl/optimistic.py @@ -0,0 +1,71 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 5. + self.N = 1. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # use optimistic initial values to select the next bandit + j = np.argmax([b.p_estimate for b in bandits]) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.ylim([0, 1]) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/rl/optimistic_starter.py b/rl/optimistic_starter.py new file mode 100644 index 00000000..56b4e5c9 --- /dev/null +++ b/rl/optimistic_starter.py @@ -0,0 +1,71 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = # TODO + self.N = # TODO + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + # TODO + self.p_estimate = # TODO + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # use optimistic initial values to select the next bandit + j = # TODO + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.ylim([0, 1]) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/rl/plot_rl_rewards.py b/rl/plot_rl_rewards.py new file mode 100644 index 00000000..ba182c9f --- /dev/null +++ b/rl/plot_rl_rewards.py @@ -0,0 +1,22 @@ +import matplotlib.pyplot as plt +import numpy as np +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') +args = parser.parse_args() + +a = np.load(f'linear_rl_trader_rewards/{args.mode}.npy') + +print(f"average reward: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}") + +if args.mode == 'train': + # show the training progress + plt.plot(a) +else: + # test - show a histogram of rewards + plt.hist(a, bins=20) + +plt.title(args.mode) +plt.show() \ No newline at end of file diff --git a/rl/policy_iteration.py b/rl/policy_iteration.py deleted file mode 100644 index 4709038d..00000000 --- a/rl/policy_iteration.py +++ /dev/null @@ -1,95 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy - -SMALL_ENOUGH = 1e-3 -GAMMA = 0.9 -ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') - -# this is deterministic -# all p(s',r|s,a) = 1 or 0 - -if __name__ == '__main__': - # this grid gives you a reward of -0.1 for every non-terminal state - # we want to see if this will encourage finding a shorter path to the goal - grid = negative_grid() - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # state -> action - # we'll randomly choose an action and update as we learn - policy = {} - for s in grid.actions.keys(): - policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS) - - # initial policy - print("initial policy:") - print_policy(policy, grid) - - # initialize V(s) - V = {} - states = grid.all_states() - for s in states: - # V[s] = 0 - if s in grid.actions: - V[s] = np.random.random() - else: - # terminal state - V[s] = 0 - - # repeat until convergence - will break out when policy does not change - while True: - - # policy evaluation step - we already know how to do this! - while True: - biggest_change = 0 - for s in states: - old_v = V[s] - - # V(s) only has value if it's not a terminal state - if s in policy: - a = policy[s] - grid.set_state(s) - r = grid.move(a) - V[s] = r + GAMMA * V[grid.current_state()] - biggest_change = max(biggest_change, np.abs(old_v - V[s])) - - if biggest_change < SMALL_ENOUGH: - break - - # policy improvement step - is_policy_converged = True - for s in states: - if s in policy: - old_a = policy[s] - new_a = None - best_value = float('-inf') - # loop through all possible actions to find the best current action - for a in ALL_POSSIBLE_ACTIONS: - grid.set_state(s) - r = grid.move(a) - v = r + GAMMA * V[grid.current_state()] - if v > best_value: - best_value = v - new_a = a - policy[s] = new_a - if new_a != old_a: - is_policy_converged = False - - if is_policy_converged: - break - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) diff --git a/rl/policy_iteration_deterministic.py b/rl/policy_iteration_deterministic.py new file mode 100644 index 00000000..e18e75bf --- /dev/null +++ b/rl/policy_iteration_deterministic.py @@ -0,0 +1,140 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from grid_world import standard_grid, ACTION_SPACE +from iterative_policy_evaluation_deterministic import print_values, print_policy + +SMALL_ENOUGH = 1e-3 +GAMMA = 0.9 + + +# copied from iterative_policy_evaluation +def get_transition_probs_and_rewards(grid): + ### define transition probabilities and grid ### + # the key is (s, a, s'), the value is the probability + # that is, transition_probs[(s, a, s')] = p(s' | s, a) + # any key NOT present will considered to be impossible (i.e. probability 0) + transition_probs = {} + + # to reduce the dimensionality of the dictionary, we'll use deterministic + # rewards, r(s, a, s') + # note: you could make it simpler by using r(s') since the reward doesn't + # actually depend on (s, a) + rewards = {} + + for i in range(grid.rows): + for j in range(grid.cols): + s = (i, j) + if not grid.is_terminal(s): + for a in ACTION_SPACE: + s2 = grid.get_next_state(s, a) + transition_probs[(s, a, s2)] = 1 + if s2 in grid.rewards: + rewards[(s, a, s2)] = grid.rewards[s2] + + return transition_probs, rewards + + +def evaluate_deterministic_policy(grid, policy, initV=None): + # initialize V(s) = 0 + if initV is None: + V = {} + for s in grid.all_states(): + V[s] = 0 + else: + # it's faster to use the existing V(s) since the value won't change + # that much from one policy to the next + V = initV + + # repeat until convergence + it = 0 + while True: + biggest_change = 0 + for s in grid.all_states(): + if not grid.is_terminal(s): + old_v = V[s] + new_v = 0 # we will accumulate the answer + for a in ACTION_SPACE: + for s2 in grid.all_states(): + + # action probability is deterministic + action_prob = 1 if policy.get(s) == a else 0 + + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + new_v += action_prob * transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + # after done getting the new value, update the value table + V[s] = new_v + biggest_change = max(biggest_change, np.abs(old_v - V[s])) + it += 1 + + if biggest_change < SMALL_ENOUGH: + break + return V + + +if __name__ == '__main__': + + grid = standard_grid() + transition_probs, rewards = get_transition_probs_and_rewards(grid) + + # print rewards + print("rewards:") + print_values(grid.rewards, grid) + + # state -> action + # we'll randomly choose an action and update as we learn + policy = {} + for s in grid.actions.keys(): + policy[s] = np.random.choice(ACTION_SPACE) + + # initial policy + print("initial policy:") + print_policy(policy, grid) + + # repeat until convergence - will break out when policy does not change + V = None + while True: + + # policy evaluation step - we already know how to do this! + V = evaluate_deterministic_policy(grid, policy, initV=V) + + # policy improvement step + is_policy_converged = True + for s in grid.actions.keys(): + old_a = policy[s] + new_a = None + best_value = float('-inf') + + # loop through all possible actions to find the best current action + for a in ACTION_SPACE: + v = 0 + for s2 in grid.all_states(): + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + v += transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + if v > best_value: + best_value = v + new_a = a + + # new_a now represents the best action in this state + policy[s] = new_a + if new_a != old_a: + is_policy_converged = False + + if is_policy_converged: + break + + # once we're done, print the final policy and values + print("values:") + print_values(V, grid) + print("policy:") + print_policy(policy, grid) diff --git a/rl/policy_iteration_probabilistic.py b/rl/policy_iteration_probabilistic.py new file mode 100644 index 00000000..0468886c --- /dev/null +++ b/rl/policy_iteration_probabilistic.py @@ -0,0 +1,136 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from grid_world import windy_grid, windy_grid_penalized, ACTION_SPACE +from iterative_policy_evaluation import print_values, print_policy + +SMALL_ENOUGH = 1e-3 +GAMMA = 0.9 + + +# copied from iterative_policy_evaluation +def get_transition_probs_and_rewards(grid): + ### define transition probabilities and grid ### + # the key is (s, a, s'), the value is the probability + # that is, transition_probs[(s, a, s')] = p(s' | s, a) + # any key NOT present will considered to be impossible (i.e. probability 0) + transition_probs = {} + + # to reduce the dimensionality of the dictionary, we'll use deterministic + # rewards, r(s, a, s') + # note: you could make it simpler by using r(s') since the reward doesn't + # actually depend on (s, a) + rewards = {} + + for (s, a), v in grid.probs.items(): + for s2, p in v.items(): + transition_probs[(s, a, s2)] = p + rewards[(s, a, s2)] = grid.rewards.get(s2, 0) + + return transition_probs, rewards + + +def evaluate_deterministic_policy(grid, policy, initV=None): + # initialize V(s) = 0 + if initV is None: + V = {} + for s in grid.all_states(): + V[s] = 0 + else: + # it's faster to use the existing V(s) since the value won't change + # that much from one policy to the next + V = initV + + # repeat until convergence + it = 0 + while True: + biggest_change = 0 + for s in grid.all_states(): + if not grid.is_terminal(s): + old_v = V[s] + new_v = 0 # we will accumulate the answer + for a in ACTION_SPACE: + for s2 in grid.all_states(): + + # action probability is deterministic + action_prob = 1 if policy.get(s) == a else 0 + + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + new_v += action_prob * transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + # after done getting the new value, update the value table + V[s] = new_v + biggest_change = max(biggest_change, np.abs(old_v - V[s])) + it += 1 + + if biggest_change < SMALL_ENOUGH: + break + return V + + +if __name__ == '__main__': + + grid = windy_grid_penalized(-0.1) + # grid = windy_grid() + transition_probs, rewards = get_transition_probs_and_rewards(grid) + + # print rewards + print("rewards:") + print_values(grid.rewards, grid) + + # state -> action + # we'll randomly choose an action and update as we learn + policy = {} + for s in grid.actions.keys(): + policy[s] = np.random.choice(ACTION_SPACE) + + # initial policy + print("initial policy:") + print_policy(policy, grid) + + # repeat until convergence - will break out when policy does not change + V = None + while True: + + # policy evaluation step - we already know how to do this! + V = evaluate_deterministic_policy(grid, policy, initV=V) + + # policy improvement step + is_policy_converged = True + for s in grid.actions.keys(): + old_a = policy[s] + new_a = None + best_value = float('-inf') + + # loop through all possible actions to find the best current action + for a in ACTION_SPACE: + v = 0 + for s2 in grid.all_states(): + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + v += transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + if v > best_value: + best_value = v + new_a = a + + # new_a now represents the best action in this state + policy[s] = new_a + if new_a != old_a: + is_policy_converged = False + + if is_policy_converged: + break + + # once we're done, print the final policy and values + print("values:") + print_values(V, grid) + print("policy:") + print_policy(policy, grid) diff --git a/rl/policy_iteration_random.py b/rl/policy_iteration_random.py deleted file mode 100644 index 54faf29e..00000000 --- a/rl/policy_iteration_random.py +++ /dev/null @@ -1,111 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy - -SMALL_ENOUGH = 1e-3 -GAMMA = 0.9 -ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') - -# next state and reward will now have some randomness -# you'll go in your desired direction with probability 0.5 -# you'll go in a random direction a' != a with probability 0.5/3 - -if __name__ == '__main__': - # this grid gives you a reward of -0.1 for every non-terminal state - # we want to see if this will encourage finding a shorter path to the goal - grid = negative_grid(step_cost=-1.0) - # grid = negative_grid(step_cost=-0.1) - # grid = standard_grid() - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # state -> action - # we'll randomly choose an action and update as we learn - policy = {} - for s in grid.actions.keys(): - policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS) - - # initial policy - print("initial policy:") - print_policy(policy, grid) - - # initialize V(s) - V = {} - states = grid.all_states() - for s in states: - # V[s] = 0 - if s in grid.actions: - V[s] = np.random.random() - else: - # terminal state - V[s] = 0 - - # repeat until convergence - will break out when policy does not change - while True: - - # policy evaluation step - we already know how to do this! - while True: - biggest_change = 0 - for s in states: - old_v = V[s] - - # V(s) only has value if it's not a terminal state - new_v = 0 - if s in policy: - for a in ALL_POSSIBLE_ACTIONS: - if a == policy[s]: - p = 0.5 - else: - p = 0.5/3 - grid.set_state(s) - r = grid.move(a) - new_v += p*(r + GAMMA * V[grid.current_state()]) - V[s] = new_v - biggest_change = max(biggest_change, np.abs(old_v - V[s])) - - if biggest_change < SMALL_ENOUGH: - break - - # policy improvement step - is_policy_converged = True - for s in states: - if s in policy: - old_a = policy[s] - new_a = None - best_value = float('-inf') - # loop through all possible actions to find the best current action - for a in ALL_POSSIBLE_ACTIONS: # chosen action - v = 0 - for a2 in ALL_POSSIBLE_ACTIONS: # resulting action - if a == a2: - p = 0.5 - else: - p = 0.5/3 - grid.set_state(s) - r = grid.move(a2) - v += p*(r + GAMMA * V[grid.current_state()]) - if v > best_value: - best_value = v - new_a = a - policy[s] = new_a - if new_a != old_a: - is_policy_converged = False - - if is_policy_converged: - break - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) - # result: every move is as bad as losing, so lose as quickly as possible diff --git a/rl/q_learning.py b/rl/q_learning.py index ace032d6..d4c3c145 100644 --- a/rl/q_learning.py +++ b/rl/q_learning.py @@ -11,29 +11,21 @@ from grid_world import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy from monte_carlo_es import max_dict -from td0_prediction import random_action GAMMA = 0.9 ALPHA = 0.1 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') +def epsilon_greedy(Q, s, eps=0.1): + if np.random.random() < eps: + return np.random.choice(ALL_POSSIBLE_ACTIONS) + else: + a_opt = max_dict(Q[s])[0] + return a_opt + + if __name__ == '__main__': - # NOTE: if we use the standard grid, there's a good chance we will end up with - # suboptimal policies - # e.g. - # --------------------------- - # R | R | R | | - # --------------------------- - # R* | | U | | - # --------------------------- - # U | R | U | L | - # since going R at (1,0) (shown with a *) incurs no cost, it's OK to keep doing that. - # we'll either end up staying in the same spot, or back to the start (2,0), at which - # point we whould then just go back up, or at (0,0), at which point we can continue - # on right. - # instead, let's penalize each movement so the agent will find a shorter route. - # # grid = standard_grid() grid = negative_grid(step_cost=-0.1) @@ -41,8 +33,6 @@ print("rewards:") print_values(grid.rewards, grid) - # no policy initialization, we will derive our policy from most recent Q - # initialize Q(s,a) Q = {} states = grid.all_states() @@ -53,63 +43,40 @@ # let's also keep track of how many times Q[s] has been updated update_counts = {} - update_counts_sa = {} - for s in states: - update_counts_sa[s] = {} - for a in ALL_POSSIBLE_ACTIONS: - update_counts_sa[s][a] = 1.0 # repeat until convergence - t = 1.0 - deltas = [] + reward_per_episode = [] for it in range(10000): - if it % 100 == 0: - t += 1e-2 if it % 2000 == 0: print("it:", it) - # instead of 'generating' an epsiode, we will PLAY - # an episode within this loop - s = (2, 0) # start state - grid.set_state(s) - - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - a, _ = max_dict(Q[s]) - biggest_change = 0 + # begin a new episode + s = grid.reset() + episode_reward = 0 while not grid.game_over(): - a = random_action(a, eps=0.5/t) # epsilon-greedy - # random action also works, but slower since you can bump into walls - # a = np.random.choice(ALL_POSSIBLE_ACTIONS) + # perform action and get next state + reward + a = epsilon_greedy(Q, s, eps=0.1) r = grid.move(a) s2 = grid.current_state() - # adaptive learning rate - alpha = ALPHA / update_counts_sa[s][a] - update_counts_sa[s][a] += 0.005 + # update reward + episode_reward += r - # we will update Q(s,a) AS we experience the episode - old_qsa = Q[s][a] - # the difference between SARSA and Q-Learning is with Q-Learning - # we will use this max[a']{ Q(s',a')} in our update - # even if we do not end up taking this action in the next step - a2, max_q_s2a2 = max_dict(Q[s2]) - Q[s][a] = Q[s][a] + alpha*(r + GAMMA*max_q_s2a2 - Q[s][a]) - biggest_change = max(biggest_change, np.abs(old_qsa - Q[s][a])) + # update Q(s,a) + maxQ = max_dict(Q[s2])[1] + Q[s][a] = Q[s][a] + ALPHA*(r + GAMMA*maxQ - Q[s][a]) # we would like to know how often Q(s) has been updated too update_counts[s] = update_counts.get(s,0) + 1 # next state becomes current state s = s2 - a = a2 - - deltas.append(biggest_change) - plt.plot(deltas) + # log the reward for this episode + reward_per_episode.append(episode_reward) + + plt.plot(reward_per_episode) + plt.title("reward_per_episode") plt.show() # determine the policy from Q* diff --git a/rl/sarsa.py b/rl/sarsa.py index 0f16e179..5e1b9d5a 100644 --- a/rl/sarsa.py +++ b/rl/sarsa.py @@ -11,29 +11,21 @@ from grid_world import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy from monte_carlo_es import max_dict -from td0_prediction import random_action GAMMA = 0.9 ALPHA = 0.1 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') +def epsilon_greedy(Q, s, eps=0.1): + if np.random.random() < eps: + return np.random.choice(ALL_POSSIBLE_ACTIONS) + else: + a_opt = max_dict(Q[s])[0] + return a_opt + + if __name__ == '__main__': - # NOTE: if we use the standard grid, there's a good chance we will end up with - # suboptimal policies - # e.g. - # --------------------------- - # R | R | R | | - # --------------------------- - # R* | | U | | - # --------------------------- - # U | R | U | L | - # since going R at (1,0) (shown with a *) incurs no cost, it's OK to keep doing that. - # we'll either end up staying in the same spot, or back to the start (2,0), at which - # point we whould then just go back up, or at (0,0), at which point we can continue - # on right. - # instead, let's penalize each movement so the agent will find a shorter route. - # # grid = standard_grid() grid = negative_grid(step_cost=-0.1) @@ -41,8 +33,6 @@ print("rewards:") print_values(grid.rewards, grid) - # no policy initialization, we will derive our policy from most recent Q - # initialize Q(s,a) Q = {} states = grid.all_states() @@ -53,49 +43,30 @@ # let's also keep track of how many times Q[s] has been updated update_counts = {} - update_counts_sa = {} - for s in states: - update_counts_sa[s] = {} - for a in ALL_POSSIBLE_ACTIONS: - update_counts_sa[s][a] = 1.0 # repeat until convergence - t = 1.0 - deltas = [] + reward_per_episode = [] for it in range(10000): - if it % 100 == 0: - t += 1e-2 if it % 2000 == 0: print("it:", it) - # instead of 'generating' an epsiode, we will PLAY - # an episode within this loop - s = (2, 0) # start state - grid.set_state(s) - - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - a = max_dict(Q[s])[0] - a = random_action(a, eps=0.5/t) - biggest_change = 0 + # begin a new episode + s = grid.reset() + a = epsilon_greedy(Q, s, eps=0.1) + episode_reward = 0 while not grid.game_over(): + # perform action and get next state + reward r = grid.move(a) s2 = grid.current_state() - # we need the next action as well since Q(s,a) depends on Q(s',a') - # if s2 not in policy then it's a terminal state, all Q are 0 - a2 = max_dict(Q[s2])[0] - a2 = random_action(a2, eps=0.5/t) # epsilon-greedy + # update reward + episode_reward += r + + # get next action + a2 = epsilon_greedy(Q, s2, eps=0.1) - # we will update Q(s,a) AS we experience the episode - alpha = ALPHA / update_counts_sa[s][a] - update_counts_sa[s][a] += 0.005 - old_qsa = Q[s][a] - Q[s][a] = Q[s][a] + alpha*(r + GAMMA*Q[s2][a2] - Q[s][a]) - biggest_change = max(biggest_change, np.abs(old_qsa - Q[s][a])) + # update Q(s,a) + Q[s][a] = Q[s][a] + ALPHA*(r + GAMMA*Q[s2][a2] - Q[s][a]) # we would like to know how often Q(s) has been updated too update_counts[s] = update_counts.get(s,0) + 1 @@ -104,9 +75,11 @@ s = s2 a = a2 - deltas.append(biggest_change) + # log the reward for this episode + reward_per_episode.append(episode_reward) - plt.plot(deltas) + plt.plot(reward_per_episode) + plt.title("reward_per_episode") plt.show() # determine the policy from Q* diff --git a/rl/td0_prediction.py b/rl/td0_prediction.py old mode 100644 new mode 100755 index 08b9b239..98101eff --- a/rl/td0_prediction.py +++ b/rl/td0_prediction.py @@ -16,31 +16,16 @@ ALPHA = 0.1 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') -# NOTE: this is only policy evaluation, not optimization -def random_action(a, eps=0.1): +def epsilon_greedy(policy, s, eps=0.1): # we'll use epsilon-soft to ensure all states are visited # what happens if you don't do this? i.e. eps=0 p = np.random.random() if p < (1 - eps): - return a + return policy[s] else: return np.random.choice(ALL_POSSIBLE_ACTIONS) -def play_game(grid, policy): - # returns a list of states and corresponding rewards (not returns as in MC) - # start at the designated start state - s = (2, 0) - grid.set_state(s) - states_and_rewards = [(s, 0)] # list of tuples of (state, reward) - while not grid.game_over(): - a = policy[s] - a = random_action(a) - r = grid.move(a) - s = grid.current_state() - states_and_rewards.append((s, r)) - return states_and_rewards - if __name__ == '__main__': # use the standard grid again (0 for every step) so that we can compare @@ -70,21 +55,35 @@ def play_game(grid, policy): for s in states: V[s] = 0 + # store max change in V(s) per episode + deltas = [] + # repeat until convergence - for it in range(1000): - - # generate an episode using pi - states_and_rewards = play_game(grid, policy) - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - for t in range(len(states_and_rewards) - 1): - s, _ = states_and_rewards[t] - s2, r = states_and_rewards[t+1] - # we will update V(s) AS we experience the episode - V[s] = V[s] + ALPHA*(r + GAMMA*V[s2] - V[s]) + n_episodes = 10000 + for it in range(n_episodes): + # begin a new episode + s = grid.reset() + + delta = 0 + while not grid.game_over(): + a = epsilon_greedy(policy, s) + + r = grid.move(a) + s_next = grid.current_state() + + # update V(s) + v_old = V[s] + V[s] = V[s] + ALPHA*(r + GAMMA*V[s_next] - V[s]) + delta = max(delta, np.abs(V[s] - v_old)) + + # next state becomes current state + s = s_next + + # store delta + deltas.append(delta) + + plt.plot(deltas) + plt.show() print("values:") print_values(V, grid) diff --git a/rl/ucb1.py b/rl/ucb1.py index 48480fe1..5779b654 100644 --- a/rl/ucb1.py +++ b/rl/ucb1.py @@ -8,74 +8,74 @@ import numpy as np import matplotlib.pyplot as plt -from comparing_epsilons import run_experiment as run_experiment_eps + + +NUM_TRIALS = 100000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] class Bandit: - def __init__(self, m): - self.m = m - self.mean = 0 - self.N = 0 + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far def pull(self): - return np.random.randn() + self.m + # draw a 1 with probability p + return np.random.random() < self.p def update(self, x): - self.N += 1 - self.mean = (1 - 1.0/self.N)*self.mean + 1.0/self.N*x + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N def ucb(mean, n, nj): - if nj == 0: - return float('inf') return mean + np.sqrt(2*np.log(n) / nj) -def run_experiment(m1, m2, m3, N): - bandits = [Bandit(m1), Bandit(m2), Bandit(m3)] +def run_experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + rewards = np.empty(NUM_TRIALS) + total_plays = 0 - data = np.empty(N) + # initialization: play each bandit once + for j in range(len(bandits)): + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) - for i in range(N): - j = np.argmax([ucb(b.mean, i+1, b.N) for b in bandits]) + for i in range(NUM_TRIALS): + j = np.argmax([ucb(b.p_estimate, total_plays, b.N) for b in bandits]) x = bandits[j].pull() + total_plays += 1 bandits[j].update(x) # for the plot - data[i] = x - cumulative_average = np.cumsum(data) / (np.arange(N) + 1) - - # for b in bandits: - # print("bandit nj:", b.N) + rewards[i] = x + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) # plot moving average ctr plt.plot(cumulative_average) - plt.plot(np.ones(N)*m1) - plt.plot(np.ones(N)*m2) - plt.plot(np.ones(N)*m3) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) plt.xscale('log') plt.show() - # for b in bandits: - # print(b.mean) - - return cumulative_average + # plot moving average ctr linear + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() -if __name__ == '__main__': - eps = run_experiment_eps(1.0, 2.0, 3.0, 0.1, 100000) - ucb = run_experiment(1.0, 2.0, 3.0, 100000) + for b in bandits: + print(b.p_estimate) - # log scale plot - plt.plot(eps, label='eps = 0.1') - plt.plot(ucb, label='ucb1') - plt.legend() - plt.xscale('log') - plt.show() + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + return cumulative_average - # linear plot - plt.plot(eps, label='eps = 0.1') - plt.plot(ucb, label='ucb1') - plt.legend() - plt.show() +if __name__ == '__main__': + run_experiment() diff --git a/rl/ucb1_starter.py b/rl/ucb1_starter.py new file mode 100644 index 00000000..9e9c3106 --- /dev/null +++ b/rl/ucb1_starter.py @@ -0,0 +1,81 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +# https://books.google.ca/books?id=_ATpBwAAQBAJ&lpg=PA201&ots=rinZM8jQ6s&dq=hoeffding%20bound%20gives%20probability%20%22greater%20than%201%22&pg=PA201#v=onepage&q&f=false +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + + +NUM_TRIALS = 100000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def ucb(mean, n, nj): + return # TODO + + +def run_experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + rewards = np.empty(NUM_TRIALS) + total_plays = 0 + + # initialization: play each bandit once + for j in range(len(bandits)): + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + for i in range(NUM_TRIALS): + j = # TODO + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + # for the plot + rewards[i] = x + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.xscale('log') + plt.show() + + # plot moving average ctr linear + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + + for b in bandits: + print(b.p_estimate) + + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + return cumulative_average + +if __name__ == '__main__': + run_experiment() + diff --git a/rl/value_iteration.py b/rl/value_iteration.py index 6367ec6f..8f6738d2 100644 --- a/rl/value_iteration.py +++ b/rl/value_iteration.py @@ -7,77 +7,89 @@ import numpy as np -from grid_world import standard_grid, negative_grid +from grid_world import windy_grid, ACTION_SPACE from iterative_policy_evaluation import print_values, print_policy SMALL_ENOUGH = 1e-3 GAMMA = 0.9 -ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') -# this is deterministic -# all p(s',r|s,a) = 1 or 0 +# copied from iterative_policy_evaluation +def get_transition_probs_and_rewards(grid): + ### define transition probabilities and grid ### + # the key is (s, a, s'), the value is the probability + # that is, transition_probs[(s, a, s')] = p(s' | s, a) + # any key NOT present will considered to be impossible (i.e. probability 0) + transition_probs = {} + + # to reduce the dimensionality of the dictionary, we'll use deterministic + # rewards, r(s, a, s') + # note: you could make it simpler by using r(s') since the reward doesn't + # actually depend on (s, a) + rewards = {} + + for (s, a), v in grid.probs.items(): + for s2, p in v.items(): + transition_probs[(s, a, s2)] = p + rewards[(s, a, s2)] = grid.rewards.get(s2, 0) + + return transition_probs, rewards if __name__ == '__main__': - # this grid gives you a reward of -0.1 for every non-terminal state - # we want to see if this will encourage finding a shorter path to the goal - grid = negative_grid() + grid = windy_grid() + transition_probs, rewards = get_transition_probs_and_rewards(grid) # print rewards print("rewards:") print_values(grid.rewards, grid) - # state -> action - # we'll randomly choose an action and update as we learn - policy = {} - for s in grid.actions.keys(): - policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS) - - # initial policy - print("initial policy:") - print_policy(policy, grid) - # initialize V(s) V = {} states = grid.all_states() for s in states: - # V[s] = 0 - if s in grid.actions: - V[s] = np.random.random() - else: - # terminal state - V[s] = 0 + V[s] = 0 # repeat until convergence # V[s] = max[a]{ sum[s',r] { p(s',r|s,a)[r + gamma*V[s']] } } + it = 0 while True: biggest_change = 0 - for s in states: - old_v = V[s] - - # V(s) only has value if it's not a terminal state - if s in policy: + for s in grid.all_states(): + if not grid.is_terminal(s): + old_v = V[s] new_v = float('-inf') - for a in ALL_POSSIBLE_ACTIONS: - grid.set_state(s) - r = grid.move(a) - v = r + GAMMA * V[grid.current_state()] + + for a in ACTION_SPACE: + v = 0 + for s2 in grid.all_states(): + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + v += transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + # keep v if it's better if v > new_v: new_v = v + V[s] = new_v biggest_change = max(biggest_change, np.abs(old_v - V[s])) + it += 1 if biggest_change < SMALL_ENOUGH: break # find a policy that leads to optimal value function - for s in policy.keys(): + policy = {} + for s in grid.actions.keys(): best_a = None best_value = float('-inf') # loop through all possible actions to find the best current action - for a in ALL_POSSIBLE_ACTIONS: - grid.set_state(s) - r = grid.move(a) - v = r + GAMMA * V[grid.current_state()] + for a in ACTION_SPACE: + v = 0 + for s2 in grid.all_states(): + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + v += transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + # best_a is the action associated with best_value if v > best_value: best_value = v best_a = a diff --git a/rl2/a3c/main.py b/rl2/a3c/main.py index 0e7f88bf..a902ff03 100644 --- a/rl2/a3c/main.py +++ b/rl2/a3c/main.py @@ -13,6 +13,14 @@ from worker import Worker +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + + ENV_NAME = "Breakout-v0" MAX_GLOBAL_STEPS = 5e6 STEPS_PER_UPDATE = 5 diff --git a/rl2/atari/dqn_tf.py b/rl2/atari/dqn_tf.py old mode 100644 new mode 100755 index 34c1ab16..e37394d1 --- a/rl2/atari/dqn_tf.py +++ b/rl2/atari/dqn_tf.py @@ -19,7 +19,12 @@ +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") ##### testing only # MAX_EXPERIENCES = 10000 @@ -141,7 +146,11 @@ def get_minibatch(self): self.states[i] = self._get_state(idx - 1) self.new_states[i] = self._get_state(idx) - return np.transpose(self.states, axes=(0, 2, 3, 1)), self.actions[self.indices], self.rewards[self.indices], np.transpose(self.new_states, axes=(0, 2, 3, 1)), self.terminal_flags[self.indices] + return np.transpose(self.states, axes=(0, 2, 3, 1)), \ + self.actions[self.indices], \ + self.rewards[self.indices], \ + np.transpose(self.new_states, axes=(0, 2, 3, 1)), \ + self.terminal_flags[self.indices] class DQN: diff --git a/rl2/atari/dqn_theano.py b/rl2/atari/dqn_theano.py old mode 100644 new mode 100755 index e0114b59..0ad3b36a --- a/rl2/atari/dqn_theano.py +++ b/rl2/atari/dqn_theano.py @@ -140,7 +140,11 @@ def get_minibatch(self): self.states[i] = self._get_state(idx - 1) self.new_states[i] = self._get_state(idx) - return self.states, self.actions[self.indices], self.rewards[self.indices], self.new_states, self.terminal_flags[self.indices] + return self.states, \ + self.actions[self.indices], \ + self.rewards[self.indices], \ + self.new_states, \ + self.terminal_flags[self.indices] def init_filter(shape): diff --git a/rl2/cartpole/dqn_tf.py b/rl2/cartpole/dqn_tf.py index b6c812b1..133772df 100644 --- a/rl2/cartpole/dqn_tf.py +++ b/rl2/cartpole/dqn_tf.py @@ -15,6 +15,17 @@ from datetime import datetime from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + + +# global counter +global_iters = 0 + # a version of HiddenLayer that keeps track of params class HiddenLayer: @@ -154,6 +165,7 @@ def sample_action(self, x, eps): def play_one(env, model, tmodel, eps, gamma, copy_period): + global global_iters observation = env.reset() done = False totalreward = 0 @@ -174,8 +186,9 @@ def play_one(env, model, tmodel, eps, gamma, copy_period): model.train(tmodel) iters += 1 + global_iters += 1 - if iters % copy_period == 0: + if global_iters % copy_period == 0: tmodel.copy_from(model) return totalreward diff --git a/rl2/cartpole/dqn_theano.py b/rl2/cartpole/dqn_theano.py index ebf7c36e..18e6844c 100644 --- a/rl2/cartpole/dqn_theano.py +++ b/rl2/cartpole/dqn_theano.py @@ -16,6 +16,14 @@ from datetime import datetime from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + + +# global counter +global_iters = 0 + # helper for adam optimizer # use tensorflow defaults @@ -170,6 +178,7 @@ def sample_action(self, x, eps): def play_one(env, model, tmodel, eps, gamma, copy_period): + global global_iters observation = env.reset() done = False totalreward = 0 @@ -190,8 +199,9 @@ def play_one(env, model, tmodel, eps, gamma, copy_period): model.train(tmodel) iters += 1 + global_iters += 1 - if iters % copy_period == 0: + if global_iters % copy_period == 0: tmodel.copy_from(model) return totalreward diff --git a/rl2/cartpole/pg_tf.py b/rl2/cartpole/pg_tf.py index 5c271494..40122df0 100644 --- a/rl2/cartpole/pg_tf.py +++ b/rl2/cartpole/pg_tf.py @@ -16,6 +16,13 @@ from datetime import datetime from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + # so you can test different architectures class HiddenLayer: @@ -169,8 +176,8 @@ def play_one_td(env, pmodel, vmodel, gamma): # reward = -200 # update the models - V_next = vmodel.predict(observation) - G = reward + gamma*np.max(V_next) + V_next = vmodel.predict(observation)[0] + G = reward + gamma*V_next advantage = G - vmodel.predict(prev_observation) pmodel.partial_fit(prev_observation, action, advantage) vmodel.partial_fit(prev_observation, G) diff --git a/rl2/cartpole/pg_theano.py b/rl2/cartpole/pg_theano.py index 3164dd37..16979d5f 100644 --- a/rl2/cartpole/pg_theano.py +++ b/rl2/cartpole/pg_theano.py @@ -17,6 +17,10 @@ from datetime import datetime from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + # so you can test different architectures class HiddenLayer: @@ -169,34 +173,34 @@ def predict(self, X): return self.predict_op(X) -# def play_one_td(env, pmodel, vmodel, gamma): -# observation = env.reset() -# done = False -# totalreward = 0 -# iters = 0 +def play_one_td(env, pmodel, vmodel, gamma): + observation = env.reset() + done = False + totalreward = 0 + iters = 0 -# while not done and iters < 2000: -# # if we reach 2000, just quit, don't want this going forever -# # the 200 limit seems a bit early -# action = pmodel.sample_action(observation) -# prev_observation = observation -# observation, reward, done, info = env.step(action) + while not done and iters < 2000: + # if we reach 2000, just quit, don't want this going forever + # the 200 limit seems a bit early + action = pmodel.sample_action(observation) + prev_observation = observation + observation, reward, done, info = env.step(action) -# if done: -# reward = -200 + if done: + reward = -200 -# # update the models -# V_next = vmodel.predict(observation) -# G = reward + gamma*np.max(V_next) -# advantage = G - vmodel.predict(prev_observation) -# pmodel.partial_fit(prev_observation, action, advantage) -# vmodel.partial_fit(prev_observation, G) + # update the models + V_next = vmodel.predict(observation) + G = reward + gamma*np.max(V_next) + advantage = G - vmodel.predict(prev_observation) + pmodel.partial_fit(prev_observation, action, advantage) + vmodel.partial_fit(prev_observation, G) -# if reward == 1: # if we changed the reward to -200 -# totalreward += reward -# iters += 1 + if reward == 1: # if we changed the reward to -200 + totalreward += reward + iters += 1 -# return totalreward + return totalreward def play_one_mc(env, pmodel, vmodel, gamma): diff --git a/rl2/cartpole/q_learning.py b/rl2/cartpole/q_learning.py index d02fbc05..3e7cc4a5 100644 --- a/rl2/cartpole/q_learning.py +++ b/rl2/cartpole/q_learning.py @@ -20,6 +20,10 @@ from sklearn.kernel_approximation import RBFSampler from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + class SGDRegressor: def __init__(self, D): diff --git a/rl2/cartpole/q_learning_bins.py b/rl2/cartpole/q_learning_bins.py index 3d3ed041..198ceb2a 100644 --- a/rl2/cartpole/q_learning_bins.py +++ b/rl2/cartpole/q_learning_bins.py @@ -15,6 +15,10 @@ from gym import wrappers from datetime import datetime +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + # turns list of integers into an int # Ex. diff --git a/rl2/cartpole/random_search.py b/rl2/cartpole/random_search.py index 77ea36d6..985bcfda 100644 --- a/rl2/cartpole/random_search.py +++ b/rl2/cartpole/random_search.py @@ -9,6 +9,10 @@ import numpy as np import matplotlib.pyplot as plt +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + def get_action(s, w): return 1 if s.dot(w) > 0 else 0 diff --git a/rl2/cartpole/save_a_video.py b/rl2/cartpole/save_a_video.py index 31690c29..e7128fc9 100644 --- a/rl2/cartpole/save_a_video.py +++ b/rl2/cartpole/save_a_video.py @@ -10,6 +10,11 @@ import numpy as np import matplotlib.pyplot as plt +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + + def get_action(s, w): return 1 if s.dot(w) > 0 else 0 @@ -63,5 +68,5 @@ def random_search(env): plt.show() # play a final set of episodes - env = wrappers.Monitor(env, 'my_awesome_dir') + env = wrappers.RecordVideo(env, 'my_awesome_dir') print("***Final run with final weights***:", play_one_episode(env, params)) diff --git a/rl2/cartpole/td_lambda.py b/rl2/cartpole/td_lambda.py index ff19f627..ba9883bc 100644 --- a/rl2/cartpole/td_lambda.py +++ b/rl2/cartpole/td_lambda.py @@ -15,6 +15,11 @@ from q_learning import FeatureTransformer from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + + class SGDRegressor: def __init__(self, D): diff --git a/rl2/cartpole/tf_warmup.py b/rl2/cartpole/tf_warmup.py index 877cd54a..1cc2efee 100644 --- a/rl2/cartpole/tf_warmup.py +++ b/rl2/cartpole/tf_warmup.py @@ -7,6 +7,9 @@ import tensorflow as tf import q_learning +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + class SGDRegressor: def __init__(self, D): diff --git a/rl2/gym_tutorial.py b/rl2/gym_tutorial.py index 7a2d7dbb..ace01452 100644 --- a/rl2/gym_tutorial.py +++ b/rl2/gym_tutorial.py @@ -6,6 +6,11 @@ # Environment page: # https://gym.openai.com/envs/CartPole-v0 +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + + # get the environment env = gym.make('CartPole-v0') diff --git a/rl2/mountaincar/n_step.py b/rl2/mountaincar/n_step.py index 5ef967ff..628fdbcf 100644 --- a/rl2/mountaincar/n_step.py +++ b/rl2/mountaincar/n_step.py @@ -54,7 +54,7 @@ def predict(self, X): # returns a list of states_and_rewards, and the total reward def play_one(model, eps, gamma, n=5): - observation = env.reset() + observation = env.reset()[0] done = False totalreward = 0 rewards = [] @@ -73,7 +73,7 @@ def play_one(model, eps, gamma, n=5): actions.append(action) prev_observation = observation - observation, reward, done, info = env.step(action) + observation, reward, done, truncated, info = env.step(action) rewards.append(reward) @@ -81,7 +81,10 @@ def play_one(model, eps, gamma, n=5): if len(rewards) >= n: # return_up_to_prediction = calculate_return_before_prediction(rewards, gamma) return_up_to_prediction = multiplier.dot(rewards[-n:]) - G = return_up_to_prediction + (gamma**n)*np.max(model.predict(observation)[0]) + action_values = model.predict(observation)[0] + # print("action_values.shape:", action_values.shape) + G = return_up_to_prediction + (gamma**n)*np.max(action_values) + # print("G:", G) model.update(states[-n], actions[-n], G) # if len(rewards) > n: diff --git a/rl2/mountaincar/pg_tf.py b/rl2/mountaincar/pg_tf.py old mode 100644 new mode 100755 index b8c8ef59..fe04b416 --- a/rl2/mountaincar/pg_tf.py +++ b/rl2/mountaincar/pg_tf.py @@ -15,6 +15,13 @@ from datetime import datetime from q_learning import plot_running_avg, FeatureTransformer, plot_cost_to_go +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + # so you can test different architectures class HiddenLayer: @@ -177,8 +184,12 @@ def play_one_td(env, pmodel, vmodel, gamma): totalreward += reward # update the models - V_next = vmodel.predict(observation) - G = reward + gamma*V_next + if done: + G = reward + else: + V_next = vmodel.predict(observation) + G = reward + gamma*V_next + advantage = G - vmodel.predict(prev_observation) pmodel.partial_fit(prev_observation, action, advantage) vmodel.partial_fit(prev_observation, G) diff --git a/rl2/mountaincar/pg_tf_random.py b/rl2/mountaincar/pg_tf_random.py index bb0d2a11..e46b7b25 100644 --- a/rl2/mountaincar/pg_tf_random.py +++ b/rl2/mountaincar/pg_tf_random.py @@ -15,6 +15,13 @@ from datetime import datetime from q_learning import plot_running_avg, FeatureTransformer +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + # so you can test different architectures class HiddenLayer: diff --git a/rl2/mountaincar/pg_theano.py b/rl2/mountaincar/pg_theano.py old mode 100644 new mode 100755 index cf1c8f01..669fc416 --- a/rl2/mountaincar/pg_theano.py +++ b/rl2/mountaincar/pg_theano.py @@ -208,7 +208,7 @@ def predict(self, X): return self.predict_op(X) -def play_one_td(env, pmodel, vmodel, gamma, train=True): +def play_one_td(env, pmodel, vmodel, gamma): observation = env.reset() done = False totalreward = 0 @@ -224,12 +224,15 @@ def play_one_td(env, pmodel, vmodel, gamma, train=True): totalreward += reward # update the models - if train: + if done: + G = reward + else: V_next = vmodel.predict(observation) G = reward + gamma*V_next - advantage = G - vmodel.predict(prev_observation) - pmodel.partial_fit(prev_observation, action, advantage) - vmodel.partial_fit(prev_observation, G) + + advantage = G - vmodel.predict(prev_observation) + pmodel.partial_fit(prev_observation, action, advantage) + vmodel.partial_fit(prev_observation, G) iters += 1 diff --git a/rl2/mountaincar/pg_theano_random.py b/rl2/mountaincar/pg_theano_random.py index 9ac07b16..c95c5971 100644 --- a/rl2/mountaincar/pg_theano_random.py +++ b/rl2/mountaincar/pg_theano_random.py @@ -16,6 +16,10 @@ from datetime import datetime from q_learning import plot_running_avg, FeatureTransformer +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + # so you can test different architectures diff --git a/rl2/mountaincar/q_learning.py b/rl2/mountaincar/q_learning.py old mode 100644 new mode 100755 index 1d4be4f2..129d67e0 --- a/rl2/mountaincar/q_learning.py +++ b/rl2/mountaincar/q_learning.py @@ -70,7 +70,7 @@ def __init__(self, env, feature_transformer, learning_rate): self.feature_transformer = feature_transformer for i in range(env.action_space.n): model = SGDRegressor(learning_rate=learning_rate) - model.partial_fit(feature_transformer.transform( [env.reset()] ), [0]) + model.partial_fit(feature_transformer.transform( [env.reset()[0]] ), [0]) self.models.append(model) def predict(self, s): @@ -99,19 +99,23 @@ def sample_action(self, s, eps): # returns a list of states_and_rewards, and the total reward def play_one(model, env, eps, gamma): - observation = env.reset() + observation = env.reset()[0] done = False totalreward = 0 iters = 0 while not done and iters < 10000: action = model.sample_action(observation, eps) prev_observation = observation - observation, reward, done, info = env.step(action) + observation, reward, done, truncated, info = env.step(action) # update the model - next = model.predict(observation) - # assert(next.shape == (1, env.action_space.n)) - G = reward + gamma*np.max(next[0]) + if done: + G = reward + else: + Qnext = model.predict(observation) + # assert(next.shape == (1, env.action_space.n)) + G = reward + gamma*np.max(Qnext[0]) + model.update(prev_observation, action, G) totalreward += reward @@ -165,14 +169,14 @@ def main(show_plots=True): N = 300 totalrewards = np.empty(N) for n in range(N): - # eps = 1.0/(0.1*n+1) - eps = 0.1*(0.97**n) + eps = 1.0/(0.1*n+1) + # eps = 0.1*(0.97**n) if n == 199: print("eps:", eps) # eps = 1.0/np.sqrt(n+1) totalreward = play_one(model, env, eps, gamma) totalrewards[n] = totalreward - if (n + 1) % 100 == 0: + if (n + 1) % 10 == 0: print("episode:", n, "total reward:", totalreward) print("avg reward for last 100 episodes:", totalrewards[-100:].mean()) print("total steps:", -totalrewards.sum()) diff --git a/rl2/mountaincar/td_lambda.py b/rl2/mountaincar/td_lambda.py old mode 100644 new mode 100755 index 4d4f292d..3d7dd8ac --- a/rl2/mountaincar/td_lambda.py +++ b/rl2/mountaincar/td_lambda.py @@ -23,6 +23,10 @@ # code we already wrote from q_learning import plot_cost_to_go, FeatureTransformer, plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + class BaseModel: def __init__(self, D): @@ -83,9 +87,9 @@ def play_one(model, env, eps, gamma, lambda_): observation, reward, done, info = env.step(action) # update the model - next = model.predict(observation) - assert(next.shape == (1, env.action_space.n)) - G = reward + gamma*np.max(next[0]) + Qnext = model.predict(observation) + assert(Qnext.shape == (1, env.action_space.n)) + G = reward + gamma*np.max(Qnext[0]) model.update(prev_observation, action, G, gamma, lambda_) totalreward += reward diff --git a/rl2v2/extra_reading.txt b/rl2v2/extra_reading.txt new file mode 100644 index 00000000..b1b113f2 --- /dev/null +++ b/rl2v2/extra_reading.txt @@ -0,0 +1,8 @@ +Gymnasium Library +https://gymnasium.farama.org/ + +Stable Baselines 3 +https://github.com/DLR-RM/stable-baselines3 + +Reinforcement Learning Prerequisites +https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python \ No newline at end of file diff --git a/rl3/a2c/a2c.py b/rl3/a2c/a2c.py index 3b7d3268..ce1667b1 100644 --- a/rl3/a2c/a2c.py +++ b/rl3/a2c/a2c.py @@ -5,6 +5,9 @@ import tensorflow as tf import os +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + def set_global_seeds(i): tf.set_random_seed(i) diff --git a/rl3/a2c/main.py b/rl3/a2c/main.py index 3bf85105..b42c86d9 100644 --- a/rl3/a2c/main.py +++ b/rl3/a2c/main.py @@ -11,6 +11,10 @@ import argparse import logging +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Mute missing instructions errors MODEL_PATH = 'models' diff --git a/rl3/ddpg.py b/rl3/ddpg.py index 3eb80d1c..3913cedd 100644 --- a/rl3/ddpg.py +++ b/rl3/ddpg.py @@ -5,6 +5,13 @@ import matplotlib.pyplot as plt from datetime import datetime +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + ### avoid crashing on Mac # doesn't seem to work diff --git a/rl3/es_mujoco.py b/rl3/es_mujoco.py index ce43f983..3ef4ffd9 100644 --- a/rl3/es_mujoco.py +++ b/rl3/es_mujoco.py @@ -10,6 +10,11 @@ import gym import sys +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + + # environment ENV_NAME = 'HalfCheetah-v2' diff --git a/rl3/gym_review.py b/rl3/gym_review.py index 26733a58..3be2ac98 100644 --- a/rl3/gym_review.py +++ b/rl3/gym_review.py @@ -3,6 +3,11 @@ import numpy as np import matplotlib.pyplot as plt +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + + def get_action(s, w): return 1 if s.dot(w) > 0 else 0 diff --git a/rl3v2/extra_reading.txt b/rl3v2/extra_reading.txt new file mode 100644 index 00000000..cdff9892 --- /dev/null +++ b/rl3v2/extra_reading.txt @@ -0,0 +1,23 @@ +=== PART 1 === + +ES (Evolution Strategies) +"Evolution Strategies as a Scalable Alternative to Reinforcement Learning" +https://arxiv.org/abs/1703.03864 + +Trust Region Evolution Strategies +https://www.microsoft.com/en-us/research/uploads/prod/2018/11/trust-region-evolution-strategies.pdf + +The CMA Evolution Strategy: A Tutorial +https://arxiv.org/pdf/1604.00772 + +Simple random search provides a competitive approach to reinforcement learning (Augmented Random Search) +https://arxiv.org/abs/1803.07055 + +=== PART 2 === + +DDPG (Deep Deterministic Policy Gradient) +"Continuous control with deep reinforcement learning" +https://arxiv.org/abs/1509.02971 + +Deterministic Policy Gradient Algorithms +http://proceedings.mlr.press/v32/silver14.pdf \ No newline at end of file diff --git a/rl3v2/visualize_es.py b/rl3v2/visualize_es.py new file mode 100644 index 00000000..e518c388 --- /dev/null +++ b/rl3v2/visualize_es.py @@ -0,0 +1,59 @@ +import numpy as np +import matplotlib.pyplot as plt + +# Objective function to minimize (you can change this) +def f(x, y): + # return np.sin(x) + np.cos(y) + return -((x - 1)**2 + y**2) + +# Evolution Strategies optimizer (simple version) +def evolution_strategies( + f, bounds, pop_size=50, sigma=0.3, alpha=0.03, iterations=100 +): + dim = 2 + mu = np.random.uniform(bounds[0], bounds[1], size=dim) + + history = [] + + for gen in range(iterations): + # Sample noise + noise = np.random.randn(pop_size, dim) + population = mu + sigma * noise + fitness = np.array([f(x[0], x[1]) for x in population]) + + history.append((population.copy(), mu.copy())) + + # Normalize fitness for weighting + fitness_norm = (fitness - np.mean(fitness)) / (np.std(fitness) + 1e-8) + mu += alpha / (pop_size * sigma) * np.dot(noise.T, fitness_norm) + + return history + +# Visualization function +def visualize_es(history, bounds, f, resolution=100): + x = np.linspace(bounds[0], bounds[1], resolution) + y = np.linspace(bounds[0], bounds[1], resolution) + X, Y = np.meshgrid(x, y) + Z = f(X, Y) + + plt.figure(figsize=(8, 6)) + for i, (pop, mu) in enumerate(history): + plt.clf() + plt.contourf(X, Y, Z, levels=50, cmap='viridis') + plt.colorbar(label="f(x, y)") + plt.scatter(pop[:, 0], pop[:, 1], c='white', s=20, label='Population') + plt.scatter(mu[0], mu[1], c='red', s=80, label='Mean', edgecolors='black') + plt.title(f"Evolution Strategies - Step {i+1}") + plt.xlim(bounds[0], bounds[1]) + plt.ylim(bounds[0], bounds[1]) + plt.xlabel('x') + plt.ylabel('y') + plt.legend() + # plt.pause(0.1) + plt.waitforbuttonpress() + plt.show() + +# Run +bounds = (-5, 5) +history = evolution_strategies(f, bounds, iterations=80) +visualize_es(history, bounds, f) diff --git a/rl3v2/visualize_hill_climbing.py b/rl3v2/visualize_hill_climbing.py new file mode 100644 index 00000000..d640a20e --- /dev/null +++ b/rl3v2/visualize_hill_climbing.py @@ -0,0 +1,61 @@ +import numpy as np +import matplotlib.pyplot as plt + +# Objective function to minimize (you can change this) +def f(x, y): + # return np.sin(x) + np.cos(y) + return -((x - 1)**2 + y**2) + +# Evolution Strategies optimizer (simple version) +def hill_climb( + f, bounds, pop_size=1, sigma=0.3, alpha=0.3, iterations=100 +): + dim = 2 + mu = np.random.uniform(bounds[0], bounds[1], size=dim) + + history = [] + best_f = f(mu) + + for gen in range(iterations): + # Sample noise + noise = np.random.randn(pop_size, dim) + population = mu + sigma * noise + fitness = np.array([f(x[0], x[1]) for x in population]) + + history.append((population.copy(), mu.copy())) + + # Update point if it's better + if fitness[0] > best_f: + best_f = fitness[0] + mu = population.flatten() + + return history + +# Visualization function +def visualize_es(history, bounds, f, resolution=100): + x = np.linspace(bounds[0], bounds[1], resolution) + y = np.linspace(bounds[0], bounds[1], resolution) + X, Y = np.meshgrid(x, y) + Z = f(X, Y) + + plt.figure(figsize=(8, 6)) + for i, (pop, mu) in enumerate(history): + plt.clf() + plt.contourf(X, Y, Z, levels=50, cmap='viridis') + plt.colorbar(label="f(x, y)") + plt.scatter(pop[:, 0], pop[:, 1], c='white', s=20, label='Population') + plt.scatter(mu[0], mu[1], c='red', s=80, label='Mean', edgecolors='black') + plt.title(f"Hill Climbing - Step {i+1}") + plt.xlim(bounds[0], bounds[1]) + plt.ylim(bounds[0], bounds[1]) + plt.xlabel('x') + plt.ylabel('y') + plt.legend() + # plt.pause(0.1) + plt.waitforbuttonpress() + plt.show() + +# Run +bounds = (-5, 5) +history = hill_climb(f, bounds, iterations=80) +visualize_es(history, bounds, f) diff --git a/rnn_class/WHERE ARE THE NOTEBOOKS.txt b/rnn_class/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/rnn_class/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/rnn_class/exercises.txt b/rnn_class/exercises.txt new file mode 100644 index 00000000..612eea3e --- /dev/null +++ b/rnn_class/exercises.txt @@ -0,0 +1,19 @@ +Logistic Regression +https://www.kaggle.com/uciml/pima-indians-diabetes-database +https://lazyprogrammer.me/course_files/exercises/diabetes.csv + +Linear Regression +https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html +https://lazyprogrammer.me/course_files/exercises/boston.txt + +ANN +https://archive.ics.uci.edu/ml/datasets/ecoli (orig) +https://www.kaggle.com/elikplim/ecoli-data-set (alt) +https://lazyprogrammer.me/course_files/exercises/ecoli.csv + +RNN +Find your own stock price dataset! + +NLP +https://www.kaggle.com/crowdflower/twitter-airline-sentiment +https://lazyprogrammer.me/course_files/exercises/AirlineSentimentTweets.csv \ No newline at end of file diff --git a/stats/extra_reading.txt b/stats/extra_reading.txt new file mode 100644 index 00000000..9dc9b858 --- /dev/null +++ b/stats/extra_reading.txt @@ -0,0 +1,2 @@ +The Unbiased Estimate of the Covariance Matrix +https://lazyprogrammer.me/covariance-matrix-divide-by-n-or-n-1/ \ No newline at end of file diff --git a/supervised_class/dt.py b/supervised_class/dt.py index f76299d3..b31933ff 100644 --- a/supervised_class/dt.py +++ b/supervised_class/dt.py @@ -25,10 +25,12 @@ def entropy(y): class TreeNode: - def __init__(self, depth=0, max_depth=None): - # print 'depth:', depth + def __init__(self, depth=1, max_depth=None): + print('depth:', depth) self.depth = depth self.max_depth = max_depth + if self.max_depth is not None and self.max_depth < self.depth: + raise Exception("depth > max_depth") def fit(self, X, Y): if len(Y) == 1 or len(set(Y)) == 1: diff --git a/supervised_class2/extra_reading.txt b/supervised_class2/extra_reading.txt index f826cb04..79711307 100644 --- a/supervised_class2/extra_reading.txt +++ b/supervised_class2/extra_reading.txt @@ -8,4 +8,7 @@ Explaining AdaBoost http://rob.schapire.net/papers/explaining-adaboost.pdf Improved Boosting Algorithms Using Confidence-rated Predictions -https://sci2s.ugr.es/keel/pdf/algorithm/articulo/1999-ML-Improved%20boosting%20algorithms%20using%20confidence-rated%20predictions%20(Schapire%20y%20Singer).pdf \ No newline at end of file +https://sci2s.ugr.es/keel/pdf/algorithm/articulo/1999-ML-Improved%20boosting%20algorithms%20using%20confidence-rated%20predictions%20(Schapire%20y%20Singer).pdf + +Why does the bootstrap work? +http://www.stat.cmu.edu/~larry/=sml/Boot.pdf \ No newline at end of file diff --git a/supervised_class2/rf_regression.py b/supervised_class2/rf_regression.py index 2b219a34..ae31cef4 100644 --- a/supervised_class2/rf_regression.py +++ b/supervised_class2/rf_regression.py @@ -1,6 +1,7 @@ # https://deeplearningcourses.com/c/machine-learning-in-python-random-forest-adaboost # https://www.udemy.com/machine-learning-in-python-random-forest-adaboost # uses house dataset from https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ +# Alternate data source: https://archive.org/download/housing_202405/housing.data # put all files in the folder ../large_files from __future__ import print_function, division from future.utils import iteritems @@ -66,8 +67,7 @@ def fit_transform(self, df): def get_data(): - # regex allows arbitrary number of spaces in separator - df = pd.read_csv('../large_files/housing.data', header=None, sep=r"\s*", engine='python') + df = pd.read_csv('housing.data', header=None, delim_whitespace=True) df.columns = [ 'crim', # numerical 'zn', # numerical @@ -128,9 +128,9 @@ def get_data(): # do a quick baseline test baseline = LinearRegression() single_tree = DecisionTreeRegressor() - print("CV single tree:", cross_val_score(single_tree, Xtrain, Ytrain).mean()) - print("CV baseline:", cross_val_score(baseline, Xtrain, Ytrain).mean()) - print("CV forest:", cross_val_score(model, Xtrain, Ytrain).mean()) + print("CV single tree:", cross_val_score(single_tree, Xtrain, Ytrain, cv=5).mean()) + print("CV baseline:", cross_val_score(baseline, Xtrain, Ytrain, cv=5).mean()) + print("CV forest:", cross_val_score(model, Xtrain, Ytrain, cv=5).mean()) # test score single_tree.fit(Xtrain, Ytrain) diff --git a/tf2.0/.gitignore b/tf2.0/.gitignore new file mode 100644 index 00000000..5d414047 --- /dev/null +++ b/tf2.0/.gitignore @@ -0,0 +1,3 @@ +rl_trader_working*.py +*rl_trader_models +*rl_trader_rewards diff --git a/tf2.0/WHERE ARE THE NOTEBOOKS.txt b/tf2.0/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/tf2.0/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/tf2.0/aapl_msi_sbux.csv b/tf2.0/aapl_msi_sbux.csv new file mode 100644 index 00000000..cb98cb88 --- /dev/null +++ b/tf2.0/aapl_msi_sbux.csv @@ -0,0 +1,1260 @@ +AAPL,MSI,SBUX +67.8542,60.3,28.185 +68.5614,60.9,28.07 +66.8428,60.83,28.13 +66.7156,60.81,27.915 +66.6556,61.12,27.775 +65.7371,61.43,27.17 +65.7128,62.03,27.225 +64.1214,61.26,26.655 +63.7228,60.88,26.675 +64.4014,61.9,27.085 +63.2571,60.28,26.605 +64.1385,60.63,26.64 +63.5099,62.09,27.285 +63.0571,62.21,27.425 +61.4957,62.03,27.435 +60.0071,62.5,27.85 +61.5919,62.97,28.255 +60.8088,63.11,28.55 +61.5117,62.64,29.125 +61.6742,62.75,29.335 +62.5528,62.56,29.305 +61.2042,62.13,29.14 +61.1928,62.22,29.2925 +61.7857,62.34,28.84 +63.3799,62.07,28.83 +65.1028,61.64,28.465 +64.9271,61.67,28.415 +64.5828,62.4,28.715 +64.6756,62.43,28.525 +65.9871,63.61,28.69 +66.2256,63.29,28.345 +65.8765,63.46,28.525 +64.5828,63.56,28.455 +63.2371,64.03,28.475 +61.2728,63.7,28.435 +61.3988,63.7,29.13 +61.7128,62.8,28.85 +61.1028,62.99,29.055 +60.4571,62.67,28.9 +60.8871,63.17,29.06 +60.9971,63.64,28.705 +62.2414,64.69,28.9 +62.0471,64.63,29.2875 +61.3999,63.87,29.545 +59.9785,61.83,28.855 +60.8914,62.96,29.28 +57.5428,62.13,29.085 +56.0071,61.15,28.86 +55.7899,61.72,29.2025 +56.9528,61.78,29.32 +58.0185,61.75,29.695 +57.9231,56.02,29.915 +58.3399,56.39,30.25 +59.6007,56.8,30.0 +61.4457,57.44,30.29 +63.2542,57.2,30.42 +62.7557,56.37,30.07 +63.6457,56.89,30.19 +64.2828,57.29,30.935 +65.8156,56.95,31.24 +65.5225,56.79,31.095 +66.2628,57.0,31.205 +65.2528,56.78,31.18 +64.7099,56.48,31.5485 +64.9628,56.17,31.41 +63.4085,56.89,31.76 +61.2642,57.1,32.035 +62.0825,57.53,31.775 +61.8942,57.84,32.065 +63.2757,58.25,31.915 +62.8085,57.77,32.125 +63.0505,57.3,32.075 +63.1628,57.48,31.76 +63.5928,57.81,31.68 +63.0627,58.53,32.13 +63.5642,58.32,31.815 +64.5114,58.54,31.735 +64.2478,57.96,31.57 +64.3885,57.83,31.73 +64.1871,57.41,31.665 +63.5871,56.27,31.17 +62.6371,56.92,31.51 +63.1158,56.94,32.52 +62.6985,56.61,33.055 +62.5142,56.38,32.71 +61.7414,56.26,32.225 +62.2807,57.19,32.985 +61.4357,56.93,32.8 +61.7142,57.33,33.015 +61.6814,57.35,33.5475 +60.4285,56.78,33.205 +59.5482,55.5,32.61 +59.0714,55.82,32.345 +57.5057,55.59,32.005 +57.5185,56.35,32.37 +56.8671,57.49,32.9 +56.2542,57.84,32.845 +56.6471,57.73,32.755 +58.4599,57.98,33.12 +59.7842,57.49,33.395 +60.1142,57.26,33.65 +59.6314,57.93,33.86 +59.2928,57.86,34.145 +60.3357,58.03,34.065 +60.1042,58.43,34.05 +61.0411,59.05,34.67 +60.9299,59.54,34.86 +61.0628,59.17,34.83 +61.4564,59.32,34.76 +61.4728,59.42,34.1 +61.6797,59.36,34.24 +60.7071,59.85,34.395 +60.9014,59.87,34.51 +59.8557,59.98,33.83 +62.9299,56.04,33.305 +62.6428,54.25,34.085 +62.9985,54.26,36.68 +63.9699,54.01,36.225 +64.7599,54.35,35.965 +64.6471,54.83,35.6445 +65.2394,55.32,36.74 +66.0771,56.02,37.115 +67.0642,56.1,36.985 +66.4642,56.4,36.4 +66.4256,56.48,36.095 +65.8585,57.13,36.47 +64.9214,57.36,36.4 +66.7656,57.44,36.465 +69.9385,57.84,36.32 +71.2142,57.71,35.925 +71.1299,56.96,35.37 +71.7614,57.15,35.355 +72.5342,57.09,35.145 +71.5814,57.05,35.33 +71.7656,56.06,35.3565 +71.8514,56.33,35.95 +71.5742,56.74,35.985 +71.8528,56.55,35.94 +69.7985,56.12,35.08 +70.1279,56.39,35.48 +70.2428,56.19,35.59 +69.6022,56.01,35.26 +69.7971,56.28,35.8 +71.2415,56.08,36.07 +70.7528,56.17,36.025 +71.1742,56.47,35.785 +72.3099,57.59,36.22 +70.6628,57.37,37.1075 +66.8156,57.25,37.695 +67.5271,57.5,37.835 +66.4142,57.46,37.785 +64.3028,57.81,37.62 +65.0456,58.28,38.02 +66.3828,59.26,38.665 +67.4714,59.69,38.175 +66.7728,60.39,38.06 +70.0914,60.37,37.68 +69.8714,59.99,38.275 +68.7899,59.85,38.17 +69.4599,59.87,38.59 +68.9642,59.75,38.665 +68.1071,59.38,38.485 +69.7085,60.89,38.58 +69.9371,60.7,38.595 +69.0585,60.56,38.435 +69.0042,61.14,38.7 +69.6785,60.89,38.4305 +68.7056,59.62,37.765 +69.5125,59.39,37.63 +69.9482,60.61,38.56 +70.4016,60.52,38.91 +70.8628,61.03,39.05 +71.2399,60.49,38.355 +71.5876,60.71,39.02 +72.0714,60.92,39.3675 +72.6985,60.81,39.655 +74.4802,61.18,39.73 +74.2667,60.43,40.45 +74.9942,62.4,40.025 +75.9871,62.51,39.525 +75.1368,62.99,39.98 +75.6965,62.44,39.355 +73.8111,62.73,39.81 +74.9851,62.25,40.415 +74.6716,62.52,40.525 +74.2899,62.39,40.185 +75.2499,62.71,40.185 +75.0641,62.68,40.995 +74.4171,62.65,40.565 +73.2131,62.49,39.535 +74.3656,63.12,40.6 +74.1496,63.51,40.495 +74.2871,64.24,40.3075 +74.3762,64.45,40.7305 +75.4514,64.58,40.57 +74.9986,65.57,40.595 +74.0898,65.42,40.27 +74.2214,64.61,39.96 +73.5714,64.58,39.845 +74.4479,65.41,40.765 +74.2571,65.88,40.675 +74.8199,65.79,40.355 +76.1999,65.57,40.755 +77.9942,65.5,40.81 +79.4385,65.88,40.73 +78.7471,65.66,40.535 +80.9031,65.79,40.275 +80.7142,64.93,39.75 +81.1286,65.23,39.86 +80.0028,66.18,39.97 +80.9185,65.79,39.865 +80.7928,65.41,38.69 +80.1942,64.6,38.2 +80.0771,64.86,38.24 +79.2042,65.05,38.175 +79.6428,65.36,38.23 +79.2842,65.52,38.045 +78.6813,66.16,38.84 +77.7799,65.85,38.575 +78.4314,65.61,38.83 +81.4413,66.78,39.16 +81.0956,67.1,39.285 +80.5571,67.18,39.44 +80.0128,67.33,39.285 +79.2171,67.25,39.275 +80.1456,67.5,39.195 +79.0185,66.33,38.585 +77.2828,66.2,38.475 +77.7042,65.92,38.085 +77.1481,66.19,38.605 +77.6371,65.99,39.015 +76.6455,66.5,38.8 +76.1342,66.15,38.835 +76.5328,65.49,37.56 +78.0556,66.35,37.73 +79.6228,65.62,38.095 +79.1785,65.81,37.645 +77.2385,66.1,37.45 +78.4385,67.11,36.825 +78.7871,64.51,36.8 +79.4542,65.34,36.695 +78.0099,64.42,37.49 +78.6428,64.43,37.105 +72.3571,64.34,36.945 +71.5356,63.98,35.78 +71.3974,64.91,35.955 +71.5142,63.8,35.56 +71.6471,62.72,34.485 +72.6842,62.99,35.325 +73.2271,62.89,35.245 +73.2156,63.4,36.18 +74.2399,64.6,37.0175 +75.5699,65.08,37.4 +76.5656,65.03,37.25 +76.5599,65.78,36.955 +77.7756,65.67,37.345 +77.7128,65.61,37.515 +77.9985,65.78,36.985 +76.7671,64.93,36.66 +75.8785,65.22,36.775 +75.0356,65.02,36.28 +75.3642,64.96,36.28 +74.5799,65.1,35.275 +73.9071,65.45,35.89 +75.3814,65.9,36.095 +75.1771,66.2,35.48 +75.3942,65.98,35.235 +75.8914,66.76,35.83 +76.0514,66.33,35.65 +75.8214,66.57,36.345 +75.7771,66.64,36.535 +75.8456,66.43,36.78 +76.5842,66.08,37.515 +76.6585,65.02,37.815 +75.8071,64.21,37.215 +74.9556,63.67,37.135 +75.2485,65.08,37.09 +75.9142,65.72,37.3 +75.8942,65.7,37.955 +75.5285,66.66,38.4775 +76.1242,66.81,38.355 +77.0271,66.05,37.885 +77.8556,66.18,37.305 +77.1114,65.16,36.77 +76.7799,64.36,36.7 +76.6942,64.3,36.85 +76.6771,64.29,36.69 +77.3785,64.91,37.005 +77.5071,65.1,36.835 +76.9699,65.09,36.545 +75.9742,64.26,35.775 +74.7814,64.43,35.215 +74.7771,64.95,35.74 +75.7599,65.26,36.24 +74.7828,63.99,35.11 +74.2299,63.39,34.365 +74.5256,63.78,34.655 +73.9942,63.37,34.445 +74.1442,63.23,35.395 +74.9914,63.15,35.075 +75.8814,62.51,35.24 +75.9569,63.27,35.5745 +74.9642,63.29,35.195 +81.1099,63.0,35.545 +81.7056,62.5,35.725 +84.8699,62.64,35.465 +84.6185,63.43,35.32 +84.2985,63.58,35.31 +84.4971,62.65,35.56 +84.6542,65.51,35.3 +85.8513,66.15,35.46 +84.9156,66.4,34.79 +84.6185,67.14,34.87 +83.9985,67.38,34.79 +83.6488,67.26,35.145 +84.6899,67.8,35.575 +84.8228,67.75,35.58 +84.8385,67.2,35.085 +84.1171,66.34,34.925 +85.3585,66.3,35.47 +86.3699,66.88,35.51 +86.3871,66.52,35.115 +86.6156,66.89,35.2 +86.7528,66.63,35.7 +87.7328,67.0,35.99 +89.3756,67.02,36.83 +89.1442,66.93,36.635 +90.7685,66.91,36.555 +90.4285,67.42,36.62 +89.8071,67.4,36.925 +91.0771,66.86,37.09 +92.1171,67.23,37.335 +92.4785,67.17,37.36 +92.2242,67.66,37.665 +93.7,67.67,37.59 +94.25,67.7,37.3 +93.86,66.93,37.4 +92.29,66.46,36.98 +91.28,66.78,37.345 +92.2,66.72,37.545 +92.08,66.64,37.655 +92.18,66.62,37.78 +91.86,67.06,38.615 +90.91,67.07,38.3 +90.83,67.1,38.365 +90.28,66.73,38.715 +90.36,66.55,39.06 +90.9,66.56,39.03 +91.98,66.78,38.97 +92.93,66.57,38.69 +93.52,66.96,39.04 +93.48,67.02,39.095 +94.03,67.41,39.53 +95.96799999999999,67.24,39.345 +95.35,66.27,39.28 +95.39,66.58,39.725 +95.035,66.45,39.425 +95.22,66.0,39.3 +96.45,66.08,39.28 +95.32,65.49,39.445 +94.78,65.67,39.365 +93.0899,64.94,38.62 +94.43,65.49,38.97 +93.939,65.74,38.805 +94.72,66.05,39.37 +97.19,65.77,39.57 +97.03,65.61,40.225 +97.671,65.0,39.37 +99.02,65.21,39.18 +98.38,64.74,39.325 +98.15,64.83,39.45 +95.6,63.68,38.84 +96.13,64.11,38.49 +95.59,64.11,38.765 +95.12,61.39,38.395 +94.96,61.21,38.565 +94.48,61.25,38.355 +94.74,62.19,38.81 +95.99,61.73,38.935 +95.97,61.64,38.91 +97.24,62.03,38.62 +97.5,61.52,38.31 +97.98,61.0,38.455 +99.16,60.81,38.795 +100.53,61.37,39.06 +100.57,61.64,39.015 +100.58,61.7,38.735 +101.32,61.23,38.64 +101.54,61.02,38.985 +100.889,60.3,38.895 +102.13,59.68,38.96 +102.25,59.37,38.905 +102.5,59.4,38.905 +103.3,59.01,38.74 +98.94,58.94,38.395 +98.12,58.98,38.58 +98.97,58.89,38.975 +98.36,61.02,38.835 +97.99,61.08,38.56 +101.0,61.22,38.605 +101.43,61.22,38.06 +101.66,61.54,37.735 +101.63,61.42,37.46 +100.86,61.69,37.545 +101.58,61.91,37.67 +101.79,62.04,37.865 +100.96,61.88,38.035 +101.06,61.68,37.3 +102.64,61.57,36.9775 +101.75,61.8,37.66 +97.87,62.24,37.06 +100.75,63.42,37.585 +100.11,63.18,37.635 +100.75,63.28,37.73 +99.18,62.34,37.305 +99.9,61.03,37.225 +99.62,61.3,37.945 +99.62,61.5,37.5725 +98.75,60.47,37.025 +100.8,61.58,37.63 +101.02,60.46,37.24 +100.73,59.05,37.23 +99.81,58.5,36.095 +98.75,58.73,36.37 +97.54,59.32,36.19 +96.26,59.18,36.32 +97.67,60.79,36.77 +99.76,61.25,37.35 +102.47,62.39,37.18 +102.99,61.63,37.3 +104.83,62.25,37.42 +105.22,62.57,37.905 +105.11,62.8,37.985 +106.74,64.06,38.525 +107.34,63.94,38.27 +106.98,63.7,38.66 +108.0,64.5,37.78 +109.4,64.68,38.05 +108.6,66.76,38.355 +108.86,64.46,38.33 +108.7,63.42,38.725 +109.01,64.14,38.895 +108.83,63.94,38.825 +109.7,63.55,38.865 +111.25,63.7,38.925 +112.82,64.43,38.945 +114.18,65.25,39.06 +113.99,65.4,38.915 +115.47,66.0,38.785 +114.67,65.94,38.91 +116.31,65.66,39.1 +116.47,65.27,39.88 +118.625,65.81,40.26 +117.6,65.6,40.105 +119.0,65.56,39.85 +118.93,65.72,40.605 +115.07,65.44,40.425 +114.63,65.51,40.185 +115.93,65.32,40.235 +115.49,65.2,40.655 +115.0,65.0,41.785 +112.4,65.27,41.9 +114.12,65.29,41.515 +111.95,63.52,41.33 +111.62,63.29,41.56 +109.73,62.31,41.625 +108.225,61.91,40.445 +106.745,61.73,39.565 +109.41,63.99,40.2175 +112.65,65.11,40.015 +111.78,65.5,39.72 +112.94,66.53,40.27 +112.54,66.93,40.715 +112.01,67.34,40.635 +113.99,67.49,40.915 +113.91,67.87,41.19 +112.52,67.53,40.895 +110.38,67.08,41.025 +109.33,66.51,40.72 +106.25,65.06,39.94 +106.26,64.51,39.615 +107.75,64.43,40.59 +111.89,65.43,41.245 +112.01,65.11,39.895 +109.25,64.35,40.115 +110.22,64.11,40.435 +109.8,63.76,40.21 +106.82,63.41,39.79 +105.99,64.05,40.305 +108.72,64.02,40.6125 +109.55,64.31,40.645 +112.4,65.36,41.37 +112.98,65.48,44.11 +113.1,65.71,44.06 +109.14,64.94,44.17 +115.31,63.84,43.7825 +118.9,63.83,44.525 +117.16,62.41,43.765 +118.63,62.81,43.995 +118.65,64.01,44.245 +119.56,63.94,44.35 +119.94,64.0,44.82 +118.93,64.66,44.5 +119.72,67.78,44.41 +122.02,68.22,45.59 +124.88,68.57,45.395 +126.46,70.0,45.9125 +127.08,69.91,45.79 +127.83,69.79,46.015 +128.715,69.12,46.5 +128.45,69.03,46.585 +129.495,69.83,46.755 +133.0,68.63,46.79 +132.17,68.53,46.725 +128.79,68.02,47.13 +130.415,68.47,47.275 +128.46,67.94,46.7425 +129.09,68.89,47.1125 +129.36,68.14,47.0 +128.54,67.64,46.53 +126.41,67.93,46.815 +126.6,66.82,46.1075 +127.14,66.57,46.52 +124.51,65.33,46.09 +122.24,65.31,45.71 +124.45,64.96,46.69 +123.59,64.8,46.645 +124.95,65.86,47.0225 +127.04,65.32,47.1925 +128.47,66.65,47.92 +127.495,66.34,48.88 +125.9,66.83,48.73 +127.21,66.52,48.685 +126.69,66.23,48.9575 +123.38,65.35,47.885 +124.24,65.42,47.54 +123.25,65.38,47.535 +126.37,66.39,47.99 +124.43,66.67,47.35 +124.25,66.67,46.51 +125.32,62.51,47.195 +127.35,61.48,47.26 +126.01,61.99,47.035 +125.6,62.42,47.615 +126.56,62.32,47.96 +127.1,62.53,48.17 +126.85,61.97,48.5 +126.3,61.91,48.3 +126.78,61.82,48.14 +126.17,61.86,48.245 +124.75,60.68,47.62 +127.6,61.16,47.97 +126.91,61.43,48.37 +128.62,61.59,48.335 +129.67,60.84,49.43 +130.28,60.57,51.84 +132.65,60.98,50.87 +130.56,60.69,50.61 +128.64,59.74,50.65 +125.15,59.75,49.58 +128.95,60.28,50.29 +128.7,60.68,50.445 +125.8,58.59,49.405 +125.01,58.75,48.93 +125.26,60.01,49.35 +127.62,60.59,49.78 +126.32,59.8,49.5 +125.865,59.42,49.71 +126.01,59.25,49.59 +128.95,59.79,50.555 +128.77,59.3,50.8 +130.19,60.12,51.18 +130.07,59.8,51.42 +130.06,59.8,51.03 +131.39,59.79,51.33 +132.54,59.66,51.48 +129.62,59.11,50.84 +132.045,59.06,51.59 +131.78,59.63,51.81 +130.28,59.0,51.96 +130.535,59.65,52.22 +129.96,59.19,51.73 +130.12,59.48,52.12 +129.36,58.8,51.72 +128.65,58.61,52.19 +127.8,58.08,51.53 +127.42,57.9,51.54 +128.88,58.49,52.69 +128.59,58.55,52.49 +127.17,57.65,52.63 +126.92,57.95,52.27 +127.6,58.18,52.965 +127.3,57.97,53.24 +127.88,58.39,54.11 +126.6,58.05,53.93 +127.61,59.22,53.9 +127.03,59.12,54.115 +128.11,58.29,53.71 +127.5,58.35,54.07 +126.75,58.38,54.62 +124.53,57.14,53.55 +125.425,57.34,53.615 +126.6,57.6,53.89 +126.44,57.51,54.24 +126.0,57.22,54.305 +125.69,57.49,54.375 +122.57,56.79,53.39 +120.07,56.94,54.05 +123.28,57.48,54.57 +125.66,58.43,55.7 +125.61,58.6,55.75 +126.82,58.89,55.34 +128.51,59.29,55.74 +129.62,58.85,55.69 +132.07,59.4,56.21 +130.75,59.57,56.2 +125.22,59.35,56.69 +125.16,58.85,56.56 +124.5,59.5,57.29 +122.77,58.71,56.98 +123.38,59.11,57.14 +122.99,59.58,57.51 +122.37,59.86,58.06 +121.3,60.16,57.93 +118.44,59.76,58.19 +114.64,60.22,58.7 +115.4,64.04,59.01 +115.13,63.8,57.23 +115.52,64.19,57.2 +119.72,63.99,56.27 +113.49,63.35,56.35 +115.24,64.6,56.38 +115.15,64.34,56.85 +115.96,64.98,57.1 +117.16,65.27,57.74 +116.5,65.77,57.83 +115.01,65.35,57.59 +112.65,63.89,55.81 +105.76,62.45,52.84 +103.12,60.79,50.34 +103.74,60.44,51.09 +109.69,63.14,53.96 +112.92,64.29,55.95 +113.29,64.55,55.63 +112.76,64.82,54.71 +107.72,63.85,53.5 +112.34,64.72,55.26 +110.37,65.11,54.69 +109.27,66.31,54.28 +112.31,69.61,55.21 +110.15,68.3,54.69 +112.57,69.09,55.37 +114.21,67.08,56.53 +115.31,66.84,56.29 +116.28,67.15,56.91 +116.41,67.47,57.26 +113.92,67.03,57.28 +113.45,67.09,56.84 +115.21,67.05,57.54 +113.4,66.58,57.12 +114.32,67.8,57.79 +115.0,67.91,58.37 +114.71,69.2,57.99 +112.44,67.93,55.77 +109.06,67.45,55.72 +110.3,68.38,56.84 +109.58,67.76,57.48 +110.38,68.4,58.08 +110.78,69.75,59.04 +111.31,69.19,58.69 +110.78,69.79,58.78 +109.5,69.5,59.46 +112.12,68.78,60.07 +111.6,69.43,60.54 +111.79,69.04,60.16 +110.21,68.7,58.82 +111.86,69.27,59.69 +111.04,69.26,59.93 +111.73,69.03,60.97 +113.77,69.48,60.88 +113.76,69.47,60.53 +115.5,70.48,61.49 +119.08,70.48,62.61 +115.28,70.05,63.43 +114.55,69.96,62.71 +119.27,70.37,63.51 +120.53,70.13,62.5 +119.5,69.97,62.57 +121.18,70.73,62.24 +122.57,71.36,62.8 +122.0,65.24,61.96 +120.92,67.4,62.28 +121.06,68.01,61.97 +120.57,68.2,61.34 +116.77,68.34,62.18 +116.11,70.02,61.87 +115.72,69.44,61.07 +112.34,69.03,59.74 +114.175,70.02,60.68 +113.69,71.05,60.55 +117.29,71.98,61.8 +118.78,72.45,61.46 +119.3,72.19,61.99 +117.75,72.24,62.64 +118.88,71.96,61.96 +118.03,71.83,62.19 +117.81,72.02,62.18 +118.3,71.78,61.39 +117.34,72.05,61.37 +116.28,71.89,61.22 +115.2,71.08,59.55 +119.03,72.11,61.75 +118.28,70.38,61.89 +118.23,69.75,62.16 +115.62,69.31,61.18 +116.17,69.37,61.87 +113.18,68.61,59.82 +112.48,68.14,59.92 +110.49,69.13,59.98 +111.34,69.52,60.35 +108.98,68.56,59.515 +106.03,67.58,58.62 +107.33,68.03,59.54 +107.23,68.87,59.99 +108.61,69.21,60.34 +108.03,69.06,60.32 +106.82,69.18,60.19 +108.74,69.64,61.13 +107.32,69.3,60.82 +105.26,68.45,60.03 +105.35,67.13,58.26 +102.71,66.39,58.65 +100.7,65.43,58.13 +96.45,64.11,56.69 +96.96,64.25,56.63 +98.53,64.37,57.82 +99.96,64.91,59.46 +97.39,63.37,57.87 +99.52,63.11,58.98 +97.13,61.59,58.0 +96.66,61.13,58.55 +96.79,60.36,56.92 +96.3,60.82,59.03 +101.42,62.04,59.17 +99.44,62.42,57.71 +99.99,63.16,58.61 +93.42,64.8,57.63 +94.09,64.74,59.285 +97.34,66.77,60.77 +96.43,66.85,61.4 +94.48,64.32,60.695 +96.35,64.88,59.53 +96.6,64.25,58.29 +94.02,62.82,54.49 +95.01,62.09,54.14 +94.99,62.24,54.42 +94.27,60.97,55.14 +93.7,60.52,54.92 +93.99,61.78,55.86 +96.64,63.42,56.41 +98.12,65.05,57.63 +96.26,64.78,56.96 +96.04,66.0,57.67 +96.88,66.75,58.87 +94.69,70.78,58.46 +96.1,72.84,58.11 +96.76,74.06,58.75 +96.91,74.86,58.34 +96.69,73.49,58.21 +100.53,71.19,60.04 +100.75,71.28,59.56 +101.5,71.25,59.04 +103.01,70.95,58.7 +101.87,71.01,58.0 +101.03,71.1,57.6 +101.12,71.48,57.07 +101.17,71.22,57.52 +102.26,71.2,57.59 +102.52,71.83,58.65 +104.58,71.97,59.08 +105.97,72.24,59.67 +105.8,72.83,59.55 +105.92,72.59,59.7 +105.91,73.12,59.1 +106.72,73.71,59.38 +106.13,73.15,58.83 +105.67,72.59,58.36 +105.19,73.37,58.96 +107.68,74.09,59.55 +109.56,74.89,60.01 +108.99,75.7,59.7 +109.99,76.11,61.02 +111.12,76.32,60.25 +109.81,75.71,60.04 +110.96,76.09,60.83 +108.54,74.99,61.17 +108.66,75.24,61.04 +109.02,74.88,60.9 +110.44,75.04,59.5 +112.04,75.37,60.21 +112.1,75.31,60.13 +109.85,75.64,60.51 +107.48,75.69,60.89 +106.91,75.97,60.9 +107.13,75.55,60.9 +105.97,74.99,60.64 +105.68,75.56,57.68 +105.08,75.51,57.77 +104.35,75.9,57.72 +97.82,76.04,56.9 +94.83,75.34,56.42 +93.74,75.19,56.23 +93.64,76.0,57.36 +95.18,74.96,56.25 +94.19,74.22,56.39 +93.24,74.25,56.25 +92.72,70.54,56.31 +92.79,70.82,56.64 +93.42,71.05,57.49 +92.51,70.07,56.23 +90.34,71.11,56.3 +90.52,70.62,55.82 +93.88,70.83,55.53 +93.49,69.89,54.88 +94.56,69.46,54.8 +94.2,68.72,54.55 +95.22,68.75,54.62 +96.43,68.78,54.6 +97.9,69.68,55.44 +99.62,69.35,55.15 +100.41,69.4,55.29 +100.35,69.5,55.15 +99.86,69.27,54.89 +98.46,69.06,54.82 +97.72,68.8,54.62 +97.92,68.47,54.61 +98.63,68.77,55.59 +99.03,68.16,55.3 +98.94,69.05,55.22 +99.65,68.56,55.58 +98.83,67.45,54.865 +97.34,66.82,55.04 +97.46,67.24,55.57 +97.14,67.54,55.35 +97.55,67.8,55.53 +95.33,67.33,55.31 +95.1,68.35,55.38 +95.91,67.81,55.81 +95.55,67.43,55.61 +96.1,68.01,56.13 +93.4,64.73,54.68 +92.04,63.08,53.69 +93.59,63.69,54.85 +94.4,64.55,56.74 +95.6,65.97,57.12 +95.89,66.01,56.99 +94.99,64.77,56.77 +95.53,65.3,56.75 +95.94,65.05,56.91 +96.68,66.38,56.51 +96.98,66.62,56.32 +97.42,67.4,57.48 +96.87,67.46,56.48 +98.79,67.58,57.59 +98.78,67.4,57.41 +99.83,67.55,56.92 +99.87,67.5,56.76 +99.96,67.93,57.54 +99.43,67.55,57.6 +98.66,68.25,57.9 +97.34,68.09,57.95 +96.67,68.42,58.31 +102.95,69.26,57.85 +104.34,69.58,58.21 +104.21,69.38,58.05 +106.05,69.63,57.63 +104.48,68.84,56.73 +105.79,69.29,55.94 +105.87,70.24,55.42 +107.48,73.5,55.9 +108.37,73.93,55.36 +108.81,74.28,55.2 +108.0,74.28,55.62 +107.93,75.52,55.47 +108.18,74.54,55.47 +109.48,75.44,55.25 +109.38,75.58,55.37 +109.22,75.68,55.8 +109.08,75.99,55.53 +109.36,76.34,54.94 +108.51,76.49,55.85 +108.85,76.99,56.4 +108.03,77.12,57.09 +107.57,77.18,57.29 +106.94,77.2,57.29 +106.82,77.29,56.8 +106.0,77.51,56.4 +106.1,76.99,56.23 +106.73,76.8,56.31 +107.73,77.95,56.18 +107.7,78.32,56.02 +108.36,78.08,56.32 +105.52,77.37,55.3 +103.13,76.65,54.35 +105.44,77.23,54.71 +107.95,76.09,53.98 +111.77,75.47,53.9 +115.57,76.04,54.11 +114.92,75.63,53.74 +113.58,75.76,53.01 +113.57,75.21,53.3 +113.55,75.73,53.98 +114.62,76.19,54.39 +112.71,76.11,54.43 +112.88,75.95,54.04 +113.09,76.32,54.19 +113.95,76.79,53.98 +112.18,77.21,53.45 +113.05,76.28,54.14 +112.52,75.25,53.84 +113.0,74.42,53.53 +113.05,74.35,53.35 +113.89,74.64,53.14 +114.06,74.48,53.46 +116.05,74.67,53.3 +116.3,73.5,52.92 +117.34,73.76,53.16 +116.98,73.06,52.95 +117.63,73.58,53.08 +117.55,73.13,52.76 +117.47,73.8,52.61 +117.12,73.8,53.15 +117.06,73.57,53.59 +116.6,73.62,53.63 +117.65,74.49,54.18 +118.25,74.16,53.67 +115.59,73.58,53.63 +114.48,73.48,53.59 +113.72,72.83,53.53 +113.54,72.58,53.07 +111.49,72.32,52.5 +111.59,71.57,52.98 +109.83,71.29,51.77 +108.84,75.9,52.75 +110.41,77.71,54.49 +111.06,78.56,54.62 +110.88,78.96,54.58 +107.79,79.19,53.57 +108.43,80.38,53.93 +105.71,80.6,54.22 +107.11,81.8,54.59 +109.99,80.51,55.44 +109.95,80.35,55.85 +110.06,79.98,55.77 +111.73,79.83,56.1 +111.8,80.31,57.12 +111.23,80.26,57.59 +111.79,80.98,57.43 +111.57,80.86,57.59 +111.46,81.11,58.17 +110.52,80.25,57.97 +109.49,79.19,58.51 +109.9,79.5,57.21 +109.11,80.92,57.5 +109.95,82.22,57.44 +111.03,83.27,58.76 +112.12,83.3,58.65 +113.95,82.79,58.75 +113.3,82.6,58.77 +115.19,83.24,59.31 +115.19,82.9,58.75 +115.82,83.46,57.71 +115.97,83.4,57.66 +116.64,83.93,57.65 +116.95,83.76,57.7 +117.06,84.0,57.44 +116.29,83.72,57.11 +116.52,83.41,57.01 +117.26,83.52,56.86 +116.76,82.86,56.35 +116.73,82.87,56.32 +115.82,82.89,55.52 +116.15,83.6,55.35 +116.02,83.49,55.99 +116.61,82.64,56.46 +117.91,82.89,57.13 +118.99,83.02,58.2 +119.11,82.63,57.88 +119.75,82.88,58.1 +119.25,82.18,58.03 +119.04,82.27,57.85 +120.0,80.73,58.0 +119.99,81.65,58.45 +119.78,81.86,57.89 +120.0,82.36,57.66 +120.08,82.44,57.76 +119.97,84.35,58.44 +121.88,85.29,58.7 +121.94,83.36,58.46 +121.95,82.98,56.12 +121.63,81.7,55.9 +121.35,80.71,55.22 +128.75,80.03,53.9 +128.53,81.0,53.87 +129.08,81.6,55.06 +130.29,81.73,55.73 +131.53,77.34,55.24 +132.04,78.25,55.22 +132.42,77.81,55.81 +132.12,78.37,56.22 +133.29,78.48,56.11 +135.02,78.68,56.58 +135.51,79.4,56.86 +135.345,78.66,56.73 +135.72,79.31,57.35 +136.7,80.15,57.54 +137.11,79.65,57.57 +136.53,79.36,57.64 +136.66,80.27,57.48 +136.93,79.28,56.78 +136.99,78.97,56.87 +139.79,79.98,57.14 +138.96,80.02,57.12 +139.78,80.55,57.1 +139.34,79.97,56.68 +139.52,79.66,56.2 +139.0,80.2,55.74 +138.68,81.37,55.19 +139.14,82.1,54.53 +139.2,81.65,54.63 +138.99,83.36,54.27 +140.46,85.24,54.54 +140.69,85.15,54.8 +139.99,84.72,55.78 +141.46,84.3,55.81 +139.84,83.76,55.54 +141.42,83.59,55.89 +140.92,83.74,55.85 +140.64,83.67,56.81 +140.88,84.0,57.23 +143.8,84.0,57.35 +144.12,84.13,57.54 +143.93,84.87,58.16 +143.66,86.22,58.39 +143.7,84.83,58.44 +144.77,84.52,58.32 +144.02,83.83,58.22 +143.66,84.2,57.92 +143.34,84.25,58.02 +143.17,83.71,57.95 +141.63,83.45,57.88 +141.8,82.84,57.58 +141.05,82.34,57.51 +141.83,83.08,58.08 +141.2,82.64,58.35 +140.68,83.37,59.04 +142.44,84.1,60.08 +142.27,83.72,60.61 +143.64,84.72,61.11 +144.53,85.39,60.96 +143.68,85.38,61.56 +143.79,86.07,61.3 +143.65,85.97,60.06 +146.58,86.16,60.18 +147.51,85.92,60.5 +147.06,86.37,60.59 +146.53,86.1,60.83 +148.96,84.44,60.95 +153.01,83.59,60.94 +153.99,84.77,60.98 +153.26,85.77,60.66 +153.95,85.36,60.27 +156.1,84.21,59.93 +155.7,84.48,60.45 +155.47,83.7,59.98 +150.25,81.85,59.73 +152.54,80.83,59.82 +153.06,80.83,61.36 +153.99,82.93,61.23 +153.8,82.11,61.15 +153.34,82.22,61.89 +153.87,82.27,62.9 +153.61,81.86,63.3 +153.67,82.83,63.26 +152.76,83.57,63.61 +153.18,85.64,63.75 +155.45,86.62,64.57 +153.93,87.31,64.27 +154.45,87.46,64.16 +155.37,86.18,63.5 +154.99,86.65,62.24 +148.98,86.17,62.19 +145.42,86.11,61.29 +146.59,86.04,60.92 +145.16,84.87,60.27 +144.29,84.45,60.09 +142.27,84.72,60.14 +146.34,86.2,60.9 +145.01,85.74,59.86 +145.87,86.24,59.96 +145.63,87.36,59.51 +146.28,88.64,59.81 +145.82,88.42,59.64 +143.73,87.72,58.96 +145.83,88.13,59.18 +143.68,86.8,58.36 +144.02,86.74,58.31 +143.5,86.68,58.25 +144.09,86.83,57.94 +142.73,85.96,57.6 +144.18,87.31,58.04 +145.06,87.23,57.81 +145.53,87.65,57.9 +145.74,88.65,58.54 +147.77,88.33,58.38 +149.04,88.61,58.76 +149.56,88.28,58.33 +150.08,88.45,58.21 +151.02,89.78,58.11 +150.34,89.96,58.03 +150.27,90.52,57.98 +152.09,90.67,58.02 +152.74,91.39,58.55 +153.46,91.84,57.94 +150.56,92.21,59.5 +149.5,91.01,54.0 +148.73,90.68,53.98 +158.59,90.43,54.73 +157.14,90.43,55.43 +155.57,90.4,55.68 +156.39,90.37,55.44 +158.81,89.2,55.63 +160.08,88.58,54.52 +161.06,88.51,53.74 +155.32,86.99,53.07 +157.48,87.48,53.18 +159.85,88.6,53.22 +161.6,87.92,53.15 +160.95,88.19,53.5 +157.86,87.13,53.04 +157.5,87.37,52.7 +157.21,87.2,53.15 +159.78,86.41,54.45 +159.98,86.21,54.08 +159.27,86.51,53.94 +159.86,86.88,54.36 +161.47,87.3,54.4 +162.91,86.94,54.1 +163.35,87.66,54.52 +164.0,88.12,54.86 +164.05,87.87,54.93 +162.08,86.66,55.13 +161.91,85.72,54.31 +161.26,86.35,53.47 +158.63,85.12,53.49 +161.5,87.01,54.02 +160.86,87.74,53.54 +159.65,85.97,54.29 +158.28,84.58,54.53 +159.88,85.48,54.67 +158.67,85.48,54.69 +158.73,85.84,54.62 +156.07,85.65,55.15 +153.39,84.99,55.01 +151.89,84.29,55.09 +150.55,83.5,54.95 +153.14,83.02,55.13 +154.23,84.1,54.99 +153.28,83.69,54.5 +154.12,84.87,53.71 +153.81,85.84,53.81 +154.48,85.69,53.99 +153.48,85.64,53.93 +155.39,86.0,54.6 +155.3,89.44,55.17 +155.84,89.1,55.02 +155.9,89.08,55.42 +156.55,89.26,55.64 +156.0,89.8,55.97 +156.99,89.93,55.72 +159.88,89.36,54.91 +160.47,88.88,54.51 +159.76,89.09,55.21 +155.98,89.65,55.4 +156.25,90.0,54.57 +156.17,89.94,54.27 +157.1,90.23,54.28 +156.41,90.04,54.16 +157.41,90.28,54.91 +163.05,91.19,54.88 +166.72,90.37,55.17 +169.04,90.54,54.84 +166.89,90.56,55.13 +168.11,90.02,54.87 +172.5,94.25,56.03 +174.25,92.43,56.57 +174.81,92.11,57.22 +176.24,92.66,57.91 +175.88,91.61,57.36 +174.67,91.07,57.04 +173.97,91.37,56.64 +171.34,91.02,56.93 +169.08,90.39,56.7 +171.1,90.97,57.24 +170.15,90.95,56.93 +169.98,92.33,56.81 +173.14,92.45,57.26 +174.96,91.83,57.14 +174.97,92.36,56.8 +174.09,92.88,55.91 +173.07,94.53,56.66 +169.48,94.17,57.51 +171.85,94.11,57.82 +171.05,93.03,57.32 +169.8,93.63,58.76 +169.64,90.66,59.34 +169.01,91.29,59.28 +169.32,92.8,59.14 +169.37,92.52,58.61 +172.67,92.33,59.07 +171.7,93.37,59.27 +172.27,93.94,59.49 +172.22,92.2,59.7 +173.97,93.15,58.29 +176.42,94.49,58.03 +174.54,93.28,58.01 +174.35,92.1,57.73 +175.01,91.62,57.58 +175.01,90.76,57.3 +170.57,90.67,57.14 +170.6,90.8,57.27 +171.08,90.57,57.81 +169.23,90.34,57.43 +172.26,90.55,57.63 +172.23,89.91,58.71 +173.03,90.66,58.93 +175.0,91.88,59.61 +174.35,92.82,59.31 +174.33,92.12,59.18 +174.29,92.38,59.82 +175.28,93.55,60.0 +177.09,96.57,60.4 +176.19,95.86,60.56 +179.1,97.28,60.66 +179.26,97.5,61.09 +178.46,97.8,61.26 +177.0,97.33,61.41 +177.04,96.76,61.69 +174.22,95.84,60.83 +171.11,97.68,60.55 +171.51,99.0,57.99 +167.96,99.18,57.02 +166.97,99.8,57.19 +167.43,99.46,56.81 +167.78,99.12,56.0 +160.5,103.87,55.77 +156.49,101.06,54.69 +163.03,102.76,55.61 +159.54,102.63,54.46 diff --git a/tf2.0/auto-mpg.data b/tf2.0/auto-mpg.data new file mode 100644 index 00000000..33404b06 --- /dev/null +++ b/tf2.0/auto-mpg.data @@ -0,0 +1,398 @@ +18.0 8 307.0 130.0 3504. 12.0 70 1 "chevrolet chevelle malibu" +15.0 8 350.0 165.0 3693. 11.5 70 1 "buick skylark 320" +18.0 8 318.0 150.0 3436. 11.0 70 1 "plymouth satellite" +16.0 8 304.0 150.0 3433. 12.0 70 1 "amc rebel sst" +17.0 8 302.0 140.0 3449. 10.5 70 1 "ford torino" +15.0 8 429.0 198.0 4341. 10.0 70 1 "ford galaxie 500" +14.0 8 454.0 220.0 4354. 9.0 70 1 "chevrolet impala" +14.0 8 440.0 215.0 4312. 8.5 70 1 "plymouth fury iii" +14.0 8 455.0 225.0 4425. 10.0 70 1 "pontiac catalina" +15.0 8 390.0 190.0 3850. 8.5 70 1 "amc ambassador dpl" +15.0 8 383.0 170.0 3563. 10.0 70 1 "dodge challenger se" +14.0 8 340.0 160.0 3609. 8.0 70 1 "plymouth 'cuda 340" +15.0 8 400.0 150.0 3761. 9.5 70 1 "chevrolet monte carlo" +14.0 8 455.0 225.0 3086. 10.0 70 1 "buick estate wagon (sw)" +24.0 4 113.0 95.00 2372. 15.0 70 3 "toyota corona mark ii" +22.0 6 198.0 95.00 2833. 15.5 70 1 "plymouth duster" +18.0 6 199.0 97.00 2774. 15.5 70 1 "amc hornet" +21.0 6 200.0 85.00 2587. 16.0 70 1 "ford maverick" +27.0 4 97.00 88.00 2130. 14.5 70 3 "datsun pl510" +26.0 4 97.00 46.00 1835. 20.5 70 2 "volkswagen 1131 deluxe sedan" +25.0 4 110.0 87.00 2672. 17.5 70 2 "peugeot 504" +24.0 4 107.0 90.00 2430. 14.5 70 2 "audi 100 ls" +25.0 4 104.0 95.00 2375. 17.5 70 2 "saab 99e" +26.0 4 121.0 113.0 2234. 12.5 70 2 "bmw 2002" +21.0 6 199.0 90.00 2648. 15.0 70 1 "amc gremlin" +10.0 8 360.0 215.0 4615. 14.0 70 1 "ford f250" +10.0 8 307.0 200.0 4376. 15.0 70 1 "chevy c20" +11.0 8 318.0 210.0 4382. 13.5 70 1 "dodge d200" +9.0 8 304.0 193.0 4732. 18.5 70 1 "hi 1200d" +27.0 4 97.00 88.00 2130. 14.5 71 3 "datsun pl510" +28.0 4 140.0 90.00 2264. 15.5 71 1 "chevrolet vega 2300" +25.0 4 113.0 95.00 2228. 14.0 71 3 "toyota corona" +25.0 4 98.00 ? 2046. 19.0 71 1 "ford pinto" +19.0 6 232.0 100.0 2634. 13.0 71 1 "amc gremlin" +16.0 6 225.0 105.0 3439. 15.5 71 1 "plymouth satellite custom" +17.0 6 250.0 100.0 3329. 15.5 71 1 "chevrolet chevelle malibu" +19.0 6 250.0 88.00 3302. 15.5 71 1 "ford torino 500" +18.0 6 232.0 100.0 3288. 15.5 71 1 "amc matador" +14.0 8 350.0 165.0 4209. 12.0 71 1 "chevrolet impala" +14.0 8 400.0 175.0 4464. 11.5 71 1 "pontiac catalina brougham" +14.0 8 351.0 153.0 4154. 13.5 71 1 "ford galaxie 500" +14.0 8 318.0 150.0 4096. 13.0 71 1 "plymouth fury iii" +12.0 8 383.0 180.0 4955. 11.5 71 1 "dodge monaco (sw)" +13.0 8 400.0 170.0 4746. 12.0 71 1 "ford country squire (sw)" +13.0 8 400.0 175.0 5140. 12.0 71 1 "pontiac safari (sw)" +18.0 6 258.0 110.0 2962. 13.5 71 1 "amc hornet sportabout (sw)" +22.0 4 140.0 72.00 2408. 19.0 71 1 "chevrolet vega (sw)" +19.0 6 250.0 100.0 3282. 15.0 71 1 "pontiac firebird" +18.0 6 250.0 88.00 3139. 14.5 71 1 "ford mustang" +23.0 4 122.0 86.00 2220. 14.0 71 1 "mercury capri 2000" +28.0 4 116.0 90.00 2123. 14.0 71 2 "opel 1900" +30.0 4 79.00 70.00 2074. 19.5 71 2 "peugeot 304" +30.0 4 88.00 76.00 2065. 14.5 71 2 "fiat 124b" +31.0 4 71.00 65.00 1773. 19.0 71 3 "toyota corolla 1200" +35.0 4 72.00 69.00 1613. 18.0 71 3 "datsun 1200" +27.0 4 97.00 60.00 1834. 19.0 71 2 "volkswagen model 111" +26.0 4 91.00 70.00 1955. 20.5 71 1 "plymouth cricket" +24.0 4 113.0 95.00 2278. 15.5 72 3 "toyota corona hardtop" +25.0 4 97.50 80.00 2126. 17.0 72 1 "dodge colt hardtop" +23.0 4 97.00 54.00 2254. 23.5 72 2 "volkswagen type 3" +20.0 4 140.0 90.00 2408. 19.5 72 1 "chevrolet vega" +21.0 4 122.0 86.00 2226. 16.5 72 1 "ford pinto runabout" +13.0 8 350.0 165.0 4274. 12.0 72 1 "chevrolet impala" +14.0 8 400.0 175.0 4385. 12.0 72 1 "pontiac catalina" +15.0 8 318.0 150.0 4135. 13.5 72 1 "plymouth fury iii" +14.0 8 351.0 153.0 4129. 13.0 72 1 "ford galaxie 500" +17.0 8 304.0 150.0 3672. 11.5 72 1 "amc ambassador sst" +11.0 8 429.0 208.0 4633. 11.0 72 1 "mercury marquis" +13.0 8 350.0 155.0 4502. 13.5 72 1 "buick lesabre custom" +12.0 8 350.0 160.0 4456. 13.5 72 1 "oldsmobile delta 88 royale" +13.0 8 400.0 190.0 4422. 12.5 72 1 "chrysler newport royal" +19.0 3 70.00 97.00 2330. 13.5 72 3 "mazda rx2 coupe" +15.0 8 304.0 150.0 3892. 12.5 72 1 "amc matador (sw)" +13.0 8 307.0 130.0 4098. 14.0 72 1 "chevrolet chevelle concours (sw)" +13.0 8 302.0 140.0 4294. 16.0 72 1 "ford gran torino (sw)" +14.0 8 318.0 150.0 4077. 14.0 72 1 "plymouth satellite custom (sw)" +18.0 4 121.0 112.0 2933. 14.5 72 2 "volvo 145e (sw)" +22.0 4 121.0 76.00 2511. 18.0 72 2 "volkswagen 411 (sw)" +21.0 4 120.0 87.00 2979. 19.5 72 2 "peugeot 504 (sw)" +26.0 4 96.00 69.00 2189. 18.0 72 2 "renault 12 (sw)" +22.0 4 122.0 86.00 2395. 16.0 72 1 "ford pinto (sw)" +28.0 4 97.00 92.00 2288. 17.0 72 3 "datsun 510 (sw)" +23.0 4 120.0 97.00 2506. 14.5 72 3 "toyouta corona mark ii (sw)" +28.0 4 98.00 80.00 2164. 15.0 72 1 "dodge colt (sw)" +27.0 4 97.00 88.00 2100. 16.5 72 3 "toyota corolla 1600 (sw)" +13.0 8 350.0 175.0 4100. 13.0 73 1 "buick century 350" +14.0 8 304.0 150.0 3672. 11.5 73 1 "amc matador" +13.0 8 350.0 145.0 3988. 13.0 73 1 "chevrolet malibu" +14.0 8 302.0 137.0 4042. 14.5 73 1 "ford gran torino" +15.0 8 318.0 150.0 3777. 12.5 73 1 "dodge coronet custom" +12.0 8 429.0 198.0 4952. 11.5 73 1 "mercury marquis brougham" +13.0 8 400.0 150.0 4464. 12.0 73 1 "chevrolet caprice classic" +13.0 8 351.0 158.0 4363. 13.0 73 1 "ford ltd" +14.0 8 318.0 150.0 4237. 14.5 73 1 "plymouth fury gran sedan" +13.0 8 440.0 215.0 4735. 11.0 73 1 "chrysler new yorker brougham" +12.0 8 455.0 225.0 4951. 11.0 73 1 "buick electra 225 custom" +13.0 8 360.0 175.0 3821. 11.0 73 1 "amc ambassador brougham" +18.0 6 225.0 105.0 3121. 16.5 73 1 "plymouth valiant" +16.0 6 250.0 100.0 3278. 18.0 73 1 "chevrolet nova custom" +18.0 6 232.0 100.0 2945. 16.0 73 1 "amc hornet" +18.0 6 250.0 88.00 3021. 16.5 73 1 "ford maverick" +23.0 6 198.0 95.00 2904. 16.0 73 1 "plymouth duster" +26.0 4 97.00 46.00 1950. 21.0 73 2 "volkswagen super beetle" +11.0 8 400.0 150.0 4997. 14.0 73 1 "chevrolet impala" +12.0 8 400.0 167.0 4906. 12.5 73 1 "ford country" +13.0 8 360.0 170.0 4654. 13.0 73 1 "plymouth custom suburb" +12.0 8 350.0 180.0 4499. 12.5 73 1 "oldsmobile vista cruiser" +18.0 6 232.0 100.0 2789. 15.0 73 1 "amc gremlin" +20.0 4 97.00 88.00 2279. 19.0 73 3 "toyota carina" +21.0 4 140.0 72.00 2401. 19.5 73 1 "chevrolet vega" +22.0 4 108.0 94.00 2379. 16.5 73 3 "datsun 610" +18.0 3 70.00 90.00 2124. 13.5 73 3 "maxda rx3" +19.0 4 122.0 85.00 2310. 18.5 73 1 "ford pinto" +21.0 6 155.0 107.0 2472. 14.0 73 1 "mercury capri v6" +26.0 4 98.00 90.00 2265. 15.5 73 2 "fiat 124 sport coupe" +15.0 8 350.0 145.0 4082. 13.0 73 1 "chevrolet monte carlo s" +16.0 8 400.0 230.0 4278. 9.50 73 1 "pontiac grand prix" +29.0 4 68.00 49.00 1867. 19.5 73 2 "fiat 128" +24.0 4 116.0 75.00 2158. 15.5 73 2 "opel manta" +20.0 4 114.0 91.00 2582. 14.0 73 2 "audi 100ls" +19.0 4 121.0 112.0 2868. 15.5 73 2 "volvo 144ea" +15.0 8 318.0 150.0 3399. 11.0 73 1 "dodge dart custom" +24.0 4 121.0 110.0 2660. 14.0 73 2 "saab 99le" +20.0 6 156.0 122.0 2807. 13.5 73 3 "toyota mark ii" +11.0 8 350.0 180.0 3664. 11.0 73 1 "oldsmobile omega" +20.0 6 198.0 95.00 3102. 16.5 74 1 "plymouth duster" +21.0 6 200.0 ? 2875. 17.0 74 1 "ford maverick" +19.0 6 232.0 100.0 2901. 16.0 74 1 "amc hornet" +15.0 6 250.0 100.0 3336. 17.0 74 1 "chevrolet nova" +31.0 4 79.00 67.00 1950. 19.0 74 3 "datsun b210" +26.0 4 122.0 80.00 2451. 16.5 74 1 "ford pinto" +32.0 4 71.00 65.00 1836. 21.0 74 3 "toyota corolla 1200" +25.0 4 140.0 75.00 2542. 17.0 74 1 "chevrolet vega" +16.0 6 250.0 100.0 3781. 17.0 74 1 "chevrolet chevelle malibu classic" +16.0 6 258.0 110.0 3632. 18.0 74 1 "amc matador" +18.0 6 225.0 105.0 3613. 16.5 74 1 "plymouth satellite sebring" +16.0 8 302.0 140.0 4141. 14.0 74 1 "ford gran torino" +13.0 8 350.0 150.0 4699. 14.5 74 1 "buick century luxus (sw)" +14.0 8 318.0 150.0 4457. 13.5 74 1 "dodge coronet custom (sw)" +14.0 8 302.0 140.0 4638. 16.0 74 1 "ford gran torino (sw)" +14.0 8 304.0 150.0 4257. 15.5 74 1 "amc matador (sw)" +29.0 4 98.00 83.00 2219. 16.5 74 2 "audi fox" +26.0 4 79.00 67.00 1963. 15.5 74 2 "volkswagen dasher" +26.0 4 97.00 78.00 2300. 14.5 74 2 "opel manta" +31.0 4 76.00 52.00 1649. 16.5 74 3 "toyota corona" +32.0 4 83.00 61.00 2003. 19.0 74 3 "datsun 710" +28.0 4 90.00 75.00 2125. 14.5 74 1 "dodge colt" +24.0 4 90.00 75.00 2108. 15.5 74 2 "fiat 128" +26.0 4 116.0 75.00 2246. 14.0 74 2 "fiat 124 tc" +24.0 4 120.0 97.00 2489. 15.0 74 3 "honda civic" +26.0 4 108.0 93.00 2391. 15.5 74 3 "subaru" +31.0 4 79.00 67.00 2000. 16.0 74 2 "fiat x1.9" +19.0 6 225.0 95.00 3264. 16.0 75 1 "plymouth valiant custom" +18.0 6 250.0 105.0 3459. 16.0 75 1 "chevrolet nova" +15.0 6 250.0 72.00 3432. 21.0 75 1 "mercury monarch" +15.0 6 250.0 72.00 3158. 19.5 75 1 "ford maverick" +16.0 8 400.0 170.0 4668. 11.5 75 1 "pontiac catalina" +15.0 8 350.0 145.0 4440. 14.0 75 1 "chevrolet bel air" +16.0 8 318.0 150.0 4498. 14.5 75 1 "plymouth grand fury" +14.0 8 351.0 148.0 4657. 13.5 75 1 "ford ltd" +17.0 6 231.0 110.0 3907. 21.0 75 1 "buick century" +16.0 6 250.0 105.0 3897. 18.5 75 1 "chevroelt chevelle malibu" +15.0 6 258.0 110.0 3730. 19.0 75 1 "amc matador" +18.0 6 225.0 95.00 3785. 19.0 75 1 "plymouth fury" +21.0 6 231.0 110.0 3039. 15.0 75 1 "buick skyhawk" +20.0 8 262.0 110.0 3221. 13.5 75 1 "chevrolet monza 2+2" +13.0 8 302.0 129.0 3169. 12.0 75 1 "ford mustang ii" +29.0 4 97.00 75.00 2171. 16.0 75 3 "toyota corolla" +23.0 4 140.0 83.00 2639. 17.0 75 1 "ford pinto" +20.0 6 232.0 100.0 2914. 16.0 75 1 "amc gremlin" +23.0 4 140.0 78.00 2592. 18.5 75 1 "pontiac astro" +24.0 4 134.0 96.00 2702. 13.5 75 3 "toyota corona" +25.0 4 90.00 71.00 2223. 16.5 75 2 "volkswagen dasher" +24.0 4 119.0 97.00 2545. 17.0 75 3 "datsun 710" +18.0 6 171.0 97.00 2984. 14.5 75 1 "ford pinto" +29.0 4 90.00 70.00 1937. 14.0 75 2 "volkswagen rabbit" +19.0 6 232.0 90.00 3211. 17.0 75 1 "amc pacer" +23.0 4 115.0 95.00 2694. 15.0 75 2 "audi 100ls" +23.0 4 120.0 88.00 2957. 17.0 75 2 "peugeot 504" +22.0 4 121.0 98.00 2945. 14.5 75 2 "volvo 244dl" +25.0 4 121.0 115.0 2671. 13.5 75 2 "saab 99le" +33.0 4 91.00 53.00 1795. 17.5 75 3 "honda civic cvcc" +28.0 4 107.0 86.00 2464. 15.5 76 2 "fiat 131" +25.0 4 116.0 81.00 2220. 16.9 76 2 "opel 1900" +25.0 4 140.0 92.00 2572. 14.9 76 1 "capri ii" +26.0 4 98.00 79.00 2255. 17.7 76 1 "dodge colt" +27.0 4 101.0 83.00 2202. 15.3 76 2 "renault 12tl" +17.5 8 305.0 140.0 4215. 13.0 76 1 "chevrolet chevelle malibu classic" +16.0 8 318.0 150.0 4190. 13.0 76 1 "dodge coronet brougham" +15.5 8 304.0 120.0 3962. 13.9 76 1 "amc matador" +14.5 8 351.0 152.0 4215. 12.8 76 1 "ford gran torino" +22.0 6 225.0 100.0 3233. 15.4 76 1 "plymouth valiant" +22.0 6 250.0 105.0 3353. 14.5 76 1 "chevrolet nova" +24.0 6 200.0 81.00 3012. 17.6 76 1 "ford maverick" +22.5 6 232.0 90.00 3085. 17.6 76 1 "amc hornet" +29.0 4 85.00 52.00 2035. 22.2 76 1 "chevrolet chevette" +24.5 4 98.00 60.00 2164. 22.1 76 1 "chevrolet woody" +29.0 4 90.00 70.00 1937. 14.2 76 2 "vw rabbit" +33.0 4 91.00 53.00 1795. 17.4 76 3 "honda civic" +20.0 6 225.0 100.0 3651. 17.7 76 1 "dodge aspen se" +18.0 6 250.0 78.00 3574. 21.0 76 1 "ford granada ghia" +18.5 6 250.0 110.0 3645. 16.2 76 1 "pontiac ventura sj" +17.5 6 258.0 95.00 3193. 17.8 76 1 "amc pacer d/l" +29.5 4 97.00 71.00 1825. 12.2 76 2 "volkswagen rabbit" +32.0 4 85.00 70.00 1990. 17.0 76 3 "datsun b-210" +28.0 4 97.00 75.00 2155. 16.4 76 3 "toyota corolla" +26.5 4 140.0 72.00 2565. 13.6 76 1 "ford pinto" +20.0 4 130.0 102.0 3150. 15.7 76 2 "volvo 245" +13.0 8 318.0 150.0 3940. 13.2 76 1 "plymouth volare premier v8" +19.0 4 120.0 88.00 3270. 21.9 76 2 "peugeot 504" +19.0 6 156.0 108.0 2930. 15.5 76 3 "toyota mark ii" +16.5 6 168.0 120.0 3820. 16.7 76 2 "mercedes-benz 280s" +16.5 8 350.0 180.0 4380. 12.1 76 1 "cadillac seville" +13.0 8 350.0 145.0 4055. 12.0 76 1 "chevy c10" +13.0 8 302.0 130.0 3870. 15.0 76 1 "ford f108" +13.0 8 318.0 150.0 3755. 14.0 76 1 "dodge d100" +31.5 4 98.00 68.00 2045. 18.5 77 3 "honda accord cvcc" +30.0 4 111.0 80.00 2155. 14.8 77 1 "buick opel isuzu deluxe" +36.0 4 79.00 58.00 1825. 18.6 77 2 "renault 5 gtl" +25.5 4 122.0 96.00 2300. 15.5 77 1 "plymouth arrow gs" +33.5 4 85.00 70.00 1945. 16.8 77 3 "datsun f-10 hatchback" +17.5 8 305.0 145.0 3880. 12.5 77 1 "chevrolet caprice classic" +17.0 8 260.0 110.0 4060. 19.0 77 1 "oldsmobile cutlass supreme" +15.5 8 318.0 145.0 4140. 13.7 77 1 "dodge monaco brougham" +15.0 8 302.0 130.0 4295. 14.9 77 1 "mercury cougar brougham" +17.5 6 250.0 110.0 3520. 16.4 77 1 "chevrolet concours" +20.5 6 231.0 105.0 3425. 16.9 77 1 "buick skylark" +19.0 6 225.0 100.0 3630. 17.7 77 1 "plymouth volare custom" +18.5 6 250.0 98.00 3525. 19.0 77 1 "ford granada" +16.0 8 400.0 180.0 4220. 11.1 77 1 "pontiac grand prix lj" +15.5 8 350.0 170.0 4165. 11.4 77 1 "chevrolet monte carlo landau" +15.5 8 400.0 190.0 4325. 12.2 77 1 "chrysler cordoba" +16.0 8 351.0 149.0 4335. 14.5 77 1 "ford thunderbird" +29.0 4 97.00 78.00 1940. 14.5 77 2 "volkswagen rabbit custom" +24.5 4 151.0 88.00 2740. 16.0 77 1 "pontiac sunbird coupe" +26.0 4 97.00 75.00 2265. 18.2 77 3 "toyota corolla liftback" +25.5 4 140.0 89.00 2755. 15.8 77 1 "ford mustang ii 2+2" +30.5 4 98.00 63.00 2051. 17.0 77 1 "chevrolet chevette" +33.5 4 98.00 83.00 2075. 15.9 77 1 "dodge colt m/m" +30.0 4 97.00 67.00 1985. 16.4 77 3 "subaru dl" +30.5 4 97.00 78.00 2190. 14.1 77 2 "volkswagen dasher" +22.0 6 146.0 97.00 2815. 14.5 77 3 "datsun 810" +21.5 4 121.0 110.0 2600. 12.8 77 2 "bmw 320i" +21.5 3 80.00 110.0 2720. 13.5 77 3 "mazda rx-4" +43.1 4 90.00 48.00 1985. 21.5 78 2 "volkswagen rabbit custom diesel" +36.1 4 98.00 66.00 1800. 14.4 78 1 "ford fiesta" +32.8 4 78.00 52.00 1985. 19.4 78 3 "mazda glc deluxe" +39.4 4 85.00 70.00 2070. 18.6 78 3 "datsun b210 gx" +36.1 4 91.00 60.00 1800. 16.4 78 3 "honda civic cvcc" +19.9 8 260.0 110.0 3365. 15.5 78 1 "oldsmobile cutlass salon brougham" +19.4 8 318.0 140.0 3735. 13.2 78 1 "dodge diplomat" +20.2 8 302.0 139.0 3570. 12.8 78 1 "mercury monarch ghia" +19.2 6 231.0 105.0 3535. 19.2 78 1 "pontiac phoenix lj" +20.5 6 200.0 95.00 3155. 18.2 78 1 "chevrolet malibu" +20.2 6 200.0 85.00 2965. 15.8 78 1 "ford fairmont (auto)" +25.1 4 140.0 88.00 2720. 15.4 78 1 "ford fairmont (man)" +20.5 6 225.0 100.0 3430. 17.2 78 1 "plymouth volare" +19.4 6 232.0 90.00 3210. 17.2 78 1 "amc concord" +20.6 6 231.0 105.0 3380. 15.8 78 1 "buick century special" +20.8 6 200.0 85.00 3070. 16.7 78 1 "mercury zephyr" +18.6 6 225.0 110.0 3620. 18.7 78 1 "dodge aspen" +18.1 6 258.0 120.0 3410. 15.1 78 1 "amc concord d/l" +19.2 8 305.0 145.0 3425. 13.2 78 1 "chevrolet monte carlo landau" +17.7 6 231.0 165.0 3445. 13.4 78 1 "buick regal sport coupe (turbo)" +18.1 8 302.0 139.0 3205. 11.2 78 1 "ford futura" +17.5 8 318.0 140.0 4080. 13.7 78 1 "dodge magnum xe" +30.0 4 98.00 68.00 2155. 16.5 78 1 "chevrolet chevette" +27.5 4 134.0 95.00 2560. 14.2 78 3 "toyota corona" +27.2 4 119.0 97.00 2300. 14.7 78 3 "datsun 510" +30.9 4 105.0 75.00 2230. 14.5 78 1 "dodge omni" +21.1 4 134.0 95.00 2515. 14.8 78 3 "toyota celica gt liftback" +23.2 4 156.0 105.0 2745. 16.7 78 1 "plymouth sapporo" +23.8 4 151.0 85.00 2855. 17.6 78 1 "oldsmobile starfire sx" +23.9 4 119.0 97.00 2405. 14.9 78 3 "datsun 200-sx" +20.3 5 131.0 103.0 2830. 15.9 78 2 "audi 5000" +17.0 6 163.0 125.0 3140. 13.6 78 2 "volvo 264gl" +21.6 4 121.0 115.0 2795. 15.7 78 2 "saab 99gle" +16.2 6 163.0 133.0 3410. 15.8 78 2 "peugeot 604sl" +31.5 4 89.00 71.00 1990. 14.9 78 2 "volkswagen scirocco" +29.5 4 98.00 68.00 2135. 16.6 78 3 "honda accord lx" +21.5 6 231.0 115.0 3245. 15.4 79 1 "pontiac lemans v6" +19.8 6 200.0 85.00 2990. 18.2 79 1 "mercury zephyr 6" +22.3 4 140.0 88.00 2890. 17.3 79 1 "ford fairmont 4" +20.2 6 232.0 90.00 3265. 18.2 79 1 "amc concord dl 6" +20.6 6 225.0 110.0 3360. 16.6 79 1 "dodge aspen 6" +17.0 8 305.0 130.0 3840. 15.4 79 1 "chevrolet caprice classic" +17.6 8 302.0 129.0 3725. 13.4 79 1 "ford ltd landau" +16.5 8 351.0 138.0 3955. 13.2 79 1 "mercury grand marquis" +18.2 8 318.0 135.0 3830. 15.2 79 1 "dodge st. regis" +16.9 8 350.0 155.0 4360. 14.9 79 1 "buick estate wagon (sw)" +15.5 8 351.0 142.0 4054. 14.3 79 1 "ford country squire (sw)" +19.2 8 267.0 125.0 3605. 15.0 79 1 "chevrolet malibu classic (sw)" +18.5 8 360.0 150.0 3940. 13.0 79 1 "chrysler lebaron town @ country (sw)" +31.9 4 89.00 71.00 1925. 14.0 79 2 "vw rabbit custom" +34.1 4 86.00 65.00 1975. 15.2 79 3 "maxda glc deluxe" +35.7 4 98.00 80.00 1915. 14.4 79 1 "dodge colt hatchback custom" +27.4 4 121.0 80.00 2670. 15.0 79 1 "amc spirit dl" +25.4 5 183.0 77.00 3530. 20.1 79 2 "mercedes benz 300d" +23.0 8 350.0 125.0 3900. 17.4 79 1 "cadillac eldorado" +27.2 4 141.0 71.00 3190. 24.8 79 2 "peugeot 504" +23.9 8 260.0 90.00 3420. 22.2 79 1 "oldsmobile cutlass salon brougham" +34.2 4 105.0 70.00 2200. 13.2 79 1 "plymouth horizon" +34.5 4 105.0 70.00 2150. 14.9 79 1 "plymouth horizon tc3" +31.8 4 85.00 65.00 2020. 19.2 79 3 "datsun 210" +37.3 4 91.00 69.00 2130. 14.7 79 2 "fiat strada custom" +28.4 4 151.0 90.00 2670. 16.0 79 1 "buick skylark limited" +28.8 6 173.0 115.0 2595. 11.3 79 1 "chevrolet citation" +26.8 6 173.0 115.0 2700. 12.9 79 1 "oldsmobile omega brougham" +33.5 4 151.0 90.00 2556. 13.2 79 1 "pontiac phoenix" +41.5 4 98.00 76.00 2144. 14.7 80 2 "vw rabbit" +38.1 4 89.00 60.00 1968. 18.8 80 3 "toyota corolla tercel" +32.1 4 98.00 70.00 2120. 15.5 80 1 "chevrolet chevette" +37.2 4 86.00 65.00 2019. 16.4 80 3 "datsun 310" +28.0 4 151.0 90.00 2678. 16.5 80 1 "chevrolet citation" +26.4 4 140.0 88.00 2870. 18.1 80 1 "ford fairmont" +24.3 4 151.0 90.00 3003. 20.1 80 1 "amc concord" +19.1 6 225.0 90.00 3381. 18.7 80 1 "dodge aspen" +34.3 4 97.00 78.00 2188. 15.8 80 2 "audi 4000" +29.8 4 134.0 90.00 2711. 15.5 80 3 "toyota corona liftback" +31.3 4 120.0 75.00 2542. 17.5 80 3 "mazda 626" +37.0 4 119.0 92.00 2434. 15.0 80 3 "datsun 510 hatchback" +32.2 4 108.0 75.00 2265. 15.2 80 3 "toyota corolla" +46.6 4 86.00 65.00 2110. 17.9 80 3 "mazda glc" +27.9 4 156.0 105.0 2800. 14.4 80 1 "dodge colt" +40.8 4 85.00 65.00 2110. 19.2 80 3 "datsun 210" +44.3 4 90.00 48.00 2085. 21.7 80 2 "vw rabbit c (diesel)" +43.4 4 90.00 48.00 2335. 23.7 80 2 "vw dasher (diesel)" +36.4 5 121.0 67.00 2950. 19.9 80 2 "audi 5000s (diesel)" +30.0 4 146.0 67.00 3250. 21.8 80 2 "mercedes-benz 240d" +44.6 4 91.00 67.00 1850. 13.8 80 3 "honda civic 1500 gl" +40.9 4 85.00 ? 1835. 17.3 80 2 "renault lecar deluxe" +33.8 4 97.00 67.00 2145. 18.0 80 3 "subaru dl" +29.8 4 89.00 62.00 1845. 15.3 80 2 "vokswagen rabbit" +32.7 6 168.0 132.0 2910. 11.4 80 3 "datsun 280-zx" +23.7 3 70.00 100.0 2420. 12.5 80 3 "mazda rx-7 gs" +35.0 4 122.0 88.00 2500. 15.1 80 2 "triumph tr7 coupe" +23.6 4 140.0 ? 2905. 14.3 80 1 "ford mustang cobra" +32.4 4 107.0 72.00 2290. 17.0 80 3 "honda accord" +27.2 4 135.0 84.00 2490. 15.7 81 1 "plymouth reliant" +26.6 4 151.0 84.00 2635. 16.4 81 1 "buick skylark" +25.8 4 156.0 92.00 2620. 14.4 81 1 "dodge aries wagon (sw)" +23.5 6 173.0 110.0 2725. 12.6 81 1 "chevrolet citation" +30.0 4 135.0 84.00 2385. 12.9 81 1 "plymouth reliant" +39.1 4 79.00 58.00 1755. 16.9 81 3 "toyota starlet" +39.0 4 86.00 64.00 1875. 16.4 81 1 "plymouth champ" +35.1 4 81.00 60.00 1760. 16.1 81 3 "honda civic 1300" +32.3 4 97.00 67.00 2065. 17.8 81 3 "subaru" +37.0 4 85.00 65.00 1975. 19.4 81 3 "datsun 210 mpg" +37.7 4 89.00 62.00 2050. 17.3 81 3 "toyota tercel" +34.1 4 91.00 68.00 1985. 16.0 81 3 "mazda glc 4" +34.7 4 105.0 63.00 2215. 14.9 81 1 "plymouth horizon 4" +34.4 4 98.00 65.00 2045. 16.2 81 1 "ford escort 4w" +29.9 4 98.00 65.00 2380. 20.7 81 1 "ford escort 2h" +33.0 4 105.0 74.00 2190. 14.2 81 2 "volkswagen jetta" +34.5 4 100.0 ? 2320. 15.8 81 2 "renault 18i" +33.7 4 107.0 75.00 2210. 14.4 81 3 "honda prelude" +32.4 4 108.0 75.00 2350. 16.8 81 3 "toyota corolla" +32.9 4 119.0 100.0 2615. 14.8 81 3 "datsun 200sx" +31.6 4 120.0 74.00 2635. 18.3 81 3 "mazda 626" +28.1 4 141.0 80.00 3230. 20.4 81 2 "peugeot 505s turbo diesel" +30.7 6 145.0 76.00 3160. 19.6 81 2 "volvo diesel" +25.4 6 168.0 116.0 2900. 12.6 81 3 "toyota cressida" +24.2 6 146.0 120.0 2930. 13.8 81 3 "datsun 810 maxima" +22.4 6 231.0 110.0 3415. 15.8 81 1 "buick century" +26.6 8 350.0 105.0 3725. 19.0 81 1 "oldsmobile cutlass ls" +20.2 6 200.0 88.00 3060. 17.1 81 1 "ford granada gl" +17.6 6 225.0 85.00 3465. 16.6 81 1 "chrysler lebaron salon" +28.0 4 112.0 88.00 2605. 19.6 82 1 "chevrolet cavalier" +27.0 4 112.0 88.00 2640. 18.6 82 1 "chevrolet cavalier wagon" +34.0 4 112.0 88.00 2395. 18.0 82 1 "chevrolet cavalier 2-door" +31.0 4 112.0 85.00 2575. 16.2 82 1 "pontiac j2000 se hatchback" +29.0 4 135.0 84.00 2525. 16.0 82 1 "dodge aries se" +27.0 4 151.0 90.00 2735. 18.0 82 1 "pontiac phoenix" +24.0 4 140.0 92.00 2865. 16.4 82 1 "ford fairmont futura" +23.0 4 151.0 ? 3035. 20.5 82 1 "amc concord dl" +36.0 4 105.0 74.00 1980. 15.3 82 2 "volkswagen rabbit l" +37.0 4 91.00 68.00 2025. 18.2 82 3 "mazda glc custom l" +31.0 4 91.00 68.00 1970. 17.6 82 3 "mazda glc custom" +38.0 4 105.0 63.00 2125. 14.7 82 1 "plymouth horizon miser" +36.0 4 98.00 70.00 2125. 17.3 82 1 "mercury lynx l" +36.0 4 120.0 88.00 2160. 14.5 82 3 "nissan stanza xe" +36.0 4 107.0 75.00 2205. 14.5 82 3 "honda accord" +34.0 4 108.0 70.00 2245 16.9 82 3 "toyota corolla" +38.0 4 91.00 67.00 1965. 15.0 82 3 "honda civic" +32.0 4 91.00 67.00 1965. 15.7 82 3 "honda civic (auto)" +38.0 4 91.00 67.00 1995. 16.2 82 3 "datsun 310 gx" +25.0 6 181.0 110.0 2945. 16.4 82 1 "buick century limited" +38.0 6 262.0 85.00 3015. 17.0 82 1 "oldsmobile cutlass ciera (diesel)" +26.0 4 156.0 92.00 2585. 14.5 82 1 "chrysler lebaron medallion" +22.0 6 232.0 112.0 2835 14.7 82 1 "ford granada l" +32.0 4 144.0 96.00 2665. 13.9 82 3 "toyota celica gt" +36.0 4 135.0 84.00 2370. 13.0 82 1 "dodge charger 2.2" +27.0 4 151.0 90.00 2950. 17.3 82 1 "chevrolet camaro" +27.0 4 140.0 86.00 2790. 15.6 82 1 "ford mustang gl" +44.0 4 97.00 52.00 2130. 24.6 82 2 "vw pickup" +32.0 4 135.0 84.00 2295. 11.6 82 1 "dodge rampage" +28.0 4 120.0 79.00 2625. 18.6 82 1 "ford ranger" +31.0 4 119.0 82.00 2720. 19.4 82 1 "chevy s-10" diff --git a/tf2.0/daily-minimum-temperatures-in-me.csv b/tf2.0/daily-minimum-temperatures-in-me.csv new file mode 100644 index 00000000..be45e5a8 --- /dev/null +++ b/tf2.0/daily-minimum-temperatures-in-me.csv @@ -0,0 +1,3654 @@ +"Date","Daily minimum temperatures in Melbourne, Australia, 1981-1990" +"1981-01-01",20.7 +"1981-01-02",17.9 +"1981-01-03",18.8 +"1981-01-04",14.6 +"1981-01-05",15.8 +"1981-01-06",15.8 +"1981-01-07",15.8 +"1981-01-08",17.4 +"1981-01-09",21.8 +"1981-01-10",20.0 +"1981-01-11",16.2 +"1981-01-12",13.3 +"1981-01-13",16.7 +"1981-01-14",21.5 +"1981-01-15",25.0 +"1981-01-16",20.7 +"1981-01-17",20.6 +"1981-01-18",24.8 +"1981-01-19",17.7 +"1981-01-20",15.5 +"1981-01-21",18.2 +"1981-01-22",12.1 +"1981-01-23",14.4 +"1981-01-24",16.0 +"1981-01-25",16.5 +"1981-01-26",18.7 +"1981-01-27",19.4 +"1981-01-28",17.2 +"1981-01-29",15.5 +"1981-01-30",15.1 +"1981-01-31",15.4 +"1981-02-01",15.3 +"1981-02-02",18.8 +"1981-02-03",21.9 +"1981-02-04",19.9 +"1981-02-05",16.6 +"1981-02-06",16.8 +"1981-02-07",14.6 +"1981-02-08",17.1 +"1981-02-09",25.0 +"1981-02-10",15.0 +"1981-02-11",13.7 +"1981-02-12",13.9 +"1981-02-13",18.3 +"1981-02-14",22.0 +"1981-02-15",22.1 +"1981-02-16",21.2 +"1981-02-17",18.4 +"1981-02-18",16.6 +"1981-02-19",16.1 +"1981-02-20",15.7 +"1981-02-21",16.6 +"1981-02-22",16.5 +"1981-02-23",14.4 +"1981-02-24",14.4 +"1981-02-25",18.5 +"1981-02-26",16.9 +"1981-02-27",17.5 +"1981-02-28",21.2 +"1981-03-01",17.8 +"1981-03-02",18.6 +"1981-03-03",17.0 +"1981-03-04",16.0 +"1981-03-05",13.3 +"1981-03-06",14.3 +"1981-03-07",11.4 +"1981-03-08",16.3 +"1981-03-09",16.1 +"1981-03-10",11.8 +"1981-03-11",12.2 +"1981-03-12",14.7 +"1981-03-13",11.8 +"1981-03-14",11.3 +"1981-03-15",10.6 +"1981-03-16",11.7 +"1981-03-17",14.2 +"1981-03-18",11.2 +"1981-03-19",16.9 +"1981-03-20",16.7 +"1981-03-21",8.1 +"1981-03-22",8.0 +"1981-03-23",8.8 +"1981-03-24",13.4 +"1981-03-25",10.9 +"1981-03-26",13.4 +"1981-03-27",11.0 +"1981-03-28",15.0 +"1981-03-29",15.7 +"1981-03-30",14.5 +"1981-03-31",15.8 +"1981-04-01",16.7 +"1981-04-02",16.8 +"1981-04-03",17.5 +"1981-04-04",17.1 +"1981-04-05",18.1 +"1981-04-06",16.6 +"1981-04-07",10.0 +"1981-04-08",14.9 +"1981-04-09",15.9 +"1981-04-10",13.0 +"1981-04-11",7.6 +"1981-04-12",11.5 +"1981-04-13",13.5 +"1981-04-14",13.0 +"1981-04-15",13.3 +"1981-04-16",12.1 +"1981-04-17",12.4 +"1981-04-18",13.2 +"1981-04-19",13.8 +"1981-04-20",10.6 +"1981-04-21",9.0 +"1981-04-22",10.0 +"1981-04-23",9.8 +"1981-04-24",11.5 +"1981-04-25",8.9 +"1981-04-26",7.4 +"1981-04-27",9.9 +"1981-04-28",9.3 +"1981-04-29",9.9 +"1981-04-30",7.4 +"1981-05-01",8.6 +"1981-05-02",11.9 +"1981-05-03",14.0 +"1981-05-04",8.6 +"1981-05-05",10.0 +"1981-05-06",13.5 +"1981-05-07",12.0 +"1981-05-08",10.5 +"1981-05-09",10.7 +"1981-05-10",8.1 +"1981-05-11",10.1 +"1981-05-12",10.6 +"1981-05-13",5.3 +"1981-05-14",6.6 +"1981-05-15",8.5 +"1981-05-16",11.2 +"1981-05-17",9.8 +"1981-05-18",5.9 +"1981-05-19",3.2 +"1981-05-20",2.1 +"1981-05-21",3.4 +"1981-05-22",5.4 +"1981-05-23",9.6 +"1981-05-24",11.5 +"1981-05-25",12.3 +"1981-05-26",12.6 +"1981-05-27",11.0 +"1981-05-28",11.2 +"1981-05-29",11.4 +"1981-05-30",11.8 +"1981-05-31",12.8 +"1981-06-01",11.6 +"1981-06-02",10.6 +"1981-06-03",9.8 +"1981-06-04",11.2 +"1981-06-05",5.7 +"1981-06-06",7.1 +"1981-06-07",2.5 +"1981-06-08",3.5 +"1981-06-09",4.6 +"1981-06-10",11.0 +"1981-06-11",5.7 +"1981-06-12",7.7 +"1981-06-13",10.4 +"1981-06-14",11.4 +"1981-06-15",9.2 +"1981-06-16",6.1 +"1981-06-17",2.7 +"1981-06-18",4.3 +"1981-06-19",6.3 +"1981-06-20",3.8 +"1981-06-21",4.4 +"1981-06-22",7.1 +"1981-06-23",4.8 +"1981-06-24",5.8 +"1981-06-25",6.2 +"1981-06-26",7.3 +"1981-06-27",9.2 +"1981-06-28",10.2 +"1981-06-29",9.5 +"1981-06-30",9.5 +"1981-07-01",10.7 +"1981-07-02",10.0 +"1981-07-03",6.5 +"1981-07-04",7.0 +"1981-07-05",7.4 +"1981-07-06",8.1 +"1981-07-07",6.6 +"1981-07-08",8.3 +"1981-07-09",8.9 +"1981-07-10",4.6 +"1981-07-11",6.8 +"1981-07-12",5.7 +"1981-07-13",6.1 +"1981-07-14",7.0 +"1981-07-15",7.2 +"1981-07-16",6.3 +"1981-07-17",8.8 +"1981-07-18",5.0 +"1981-07-19",7.4 +"1981-07-20",10.1 +"1981-07-21",12.0 +"1981-07-22",9.0 +"1981-07-23",8.9 +"1981-07-24",9.8 +"1981-07-25",9.0 +"1981-07-26",9.2 +"1981-07-27",7.7 +"1981-07-28",8.0 +"1981-07-29",6.1 +"1981-07-30",3.5 +"1981-07-31",3.2 +"1981-08-01",5.7 +"1981-08-02",7.7 +"1981-08-03",9.0 +"1981-08-04",10.0 +"1981-08-05",6.2 +"1981-08-06",6.9 +"1981-08-07",6.5 +"1981-08-08",6.8 +"1981-08-09",7.0 +"1981-08-10",5.2 +"1981-08-11",3.0 +"1981-08-12",5.6 +"1981-08-13",7.9 +"1981-08-14",9.0 +"1981-08-15",8.6 +"1981-08-16",10.3 +"1981-08-17",10.5 +"1981-08-18",7.6 +"1981-08-19",9.7 +"1981-08-20",12.5 +"1981-08-21",7.4 +"1981-08-22",7.9 +"1981-08-23",3.9 +"1981-08-24",6.6 +"1981-08-25",4.6 +"1981-08-26",7.0 +"1981-08-27",6.0 +"1981-08-28",5.5 +"1981-08-29",8.1 +"1981-08-30",5.5 +"1981-08-31",6.2 +"1981-09-01",8.0 +"1981-09-02",10.3 +"1981-09-03",9.8 +"1981-09-04",9.6 +"1981-09-05",8.5 +"1981-09-06",7.5 +"1981-09-07",11.2 +"1981-09-08",14.6 +"1981-09-09",11.7 +"1981-09-10",7.8 +"1981-09-11",12.3 +"1981-09-12",10.1 +"1981-09-13",11.5 +"1981-09-14",7.3 +"1981-09-15",10.9 +"1981-09-16",14.1 +"1981-09-17",10.7 +"1981-09-18",16.9 +"1981-09-19",10.5 +"1981-09-20",6.5 +"1981-09-21",11.0 +"1981-09-22",6.3 +"1981-09-23",10.5 +"1981-09-24",7.2 +"1981-09-25",7.6 +"1981-09-26",10.7 +"1981-09-27",7.8 +"1981-09-28",9.6 +"1981-09-29",11.4 +"1981-09-30",12.4 +"1981-10-01",8.9 +"1981-10-02",13.2 +"1981-10-03",8.6 +"1981-10-04",6.2 +"1981-10-05",11.4 +"1981-10-06",13.2 +"1981-10-07",14.3 +"1981-10-08",7.3 +"1981-10-09",12.9 +"1981-10-10",7.8 +"1981-10-11",6.2 +"1981-10-12",5.6 +"1981-10-13",10.0 +"1981-10-14",13.3 +"1981-10-15",8.3 +"1981-10-16",10.2 +"1981-10-17",8.6 +"1981-10-18",7.3 +"1981-10-19",10.4 +"1981-10-20",11.2 +"1981-10-21",13.2 +"1981-10-22",11.4 +"1981-10-23",9.1 +"1981-10-24",6.6 +"1981-10-25",8.4 +"1981-10-26",9.7 +"1981-10-27",13.2 +"1981-10-28",12.5 +"1981-10-29",11.0 +"1981-10-30",11.0 +"1981-10-31",11.7 +"1981-11-01",9.2 +"1981-11-02",11.5 +"1981-11-03",13.6 +"1981-11-04",13.7 +"1981-11-05",10.4 +"1981-11-06",11.5 +"1981-11-07",7.6 +"1981-11-08",9.6 +"1981-11-09",14.2 +"1981-11-10",15.7 +"1981-11-11",10.5 +"1981-11-12",10.5 +"1981-11-13",9.7 +"1981-11-14",9.5 +"1981-11-15",11.3 +"1981-11-16",8.9 +"1981-11-17",9.4 +"1981-11-18",11.9 +"1981-11-19",11.7 +"1981-11-20",13.4 +"1981-11-21",12.6 +"1981-11-22",10.1 +"1981-11-23",15.8 +"1981-11-24",13.6 +"1981-11-25",11.9 +"1981-11-26",9.9 +"1981-11-27",12.6 +"1981-11-28",17.8 +"1981-11-29",15.0 +"1981-11-30",13.6 +"1981-12-01",13.4 +"1981-12-02",10.5 +"1981-12-03",14.2 +"1981-12-04",11.5 +"1981-12-05",13.0 +"1981-12-06",15.0 +"1981-12-07",14.7 +"1981-12-08",12.6 +"1981-12-09",12.5 +"1981-12-10",13.5 +"1981-12-11",14.8 +"1981-12-12",17.2 +"1981-12-13",9.7 +"1981-12-14",12.1 +"1981-12-15",12.8 +"1981-12-16",11.2 +"1981-12-17",16.4 +"1981-12-18",15.6 +"1981-12-19",13.3 +"1981-12-20",11.0 +"1981-12-21",11.1 +"1981-12-22",15.0 +"1981-12-23",12.8 +"1981-12-24",15.0 +"1981-12-25",14.2 +"1981-12-26",14.0 +"1981-12-27",15.5 +"1981-12-28",13.3 +"1981-12-29",15.6 +"1981-12-30",15.2 +"1981-12-31",17.4 +"1982-01-01",17.0 +"1982-01-02",15.0 +"1982-01-03",13.5 +"1982-01-04",15.2 +"1982-01-05",13.0 +"1982-01-06",12.5 +"1982-01-07",14.1 +"1982-01-08",14.8 +"1982-01-09",16.2 +"1982-01-10",15.8 +"1982-01-11",19.1 +"1982-01-12",22.2 +"1982-01-13",15.9 +"1982-01-14",13.0 +"1982-01-15",14.1 +"1982-01-16",15.8 +"1982-01-17",24.0 +"1982-01-18",18.0 +"1982-01-19",19.7 +"1982-01-20",25.2 +"1982-01-21",20.5 +"1982-01-22",19.3 +"1982-01-23",15.8 +"1982-01-24",17.0 +"1982-01-25",18.4 +"1982-01-26",13.3 +"1982-01-27",14.6 +"1982-01-28",12.5 +"1982-01-29",17.0 +"1982-01-30",17.1 +"1982-01-31",14.0 +"1982-02-01",14.6 +"1982-02-02",13.3 +"1982-02-03",14.8 +"1982-02-04",15.1 +"1982-02-05",13.1 +"1982-02-06",13.6 +"1982-02-07",19.5 +"1982-02-08",22.7 +"1982-02-09",17.2 +"1982-02-10",13.5 +"1982-02-11",15.4 +"1982-02-12",17.0 +"1982-02-13",19.2 +"1982-02-14",22.8 +"1982-02-15",26.3 +"1982-02-16",18.2 +"1982-02-17",17.0 +"1982-02-18",14.8 +"1982-02-19",12.8 +"1982-02-20",15.5 +"1982-02-21",15.6 +"1982-02-22",13.1 +"1982-02-23",15.2 +"1982-02-24",14.1 +"1982-02-25",12.5 +"1982-02-26",14.6 +"1982-02-27",10.4 +"1982-02-28",13.9 +"1982-03-01",11.9 +"1982-03-02",13.5 +"1982-03-03",9.8 +"1982-03-04",14.0 +"1982-03-05",21.5 +"1982-03-06",19.5 +"1982-03-07",16.7 +"1982-03-08",19.1 +"1982-03-09",11.0 +"1982-03-10",9.0 +"1982-03-11",10.0 +"1982-03-12",14.6 +"1982-03-13",12.5 +"1982-03-14",17.2 +"1982-03-15",19.2 +"1982-03-16",22.2 +"1982-03-17",15.7 +"1982-03-18",14.2 +"1982-03-19",9.8 +"1982-03-20",14.0 +"1982-03-21",17.5 +"1982-03-22",20.7 +"1982-03-23",15.6 +"1982-03-24",13.2 +"1982-03-25",14.5 +"1982-03-26",16.8 +"1982-03-27",17.2 +"1982-03-28",13.4 +"1982-03-29",14.2 +"1982-03-30",14.3 +"1982-03-31",10.2 +"1982-04-01",10.4 +"1982-04-02",12.3 +"1982-04-03",11.9 +"1982-04-04",11.2 +"1982-04-05",8.5 +"1982-04-06",12.0 +"1982-04-07",12.4 +"1982-04-08",12.9 +"1982-04-09",10.1 +"1982-04-10",15.0 +"1982-04-11",13.6 +"1982-04-12",12.4 +"1982-04-13",13.6 +"1982-04-14",16.1 +"1982-04-15",19.5 +"1982-04-16",14.2 +"1982-04-17",9.3 +"1982-04-18",10.1 +"1982-04-19",7.4 +"1982-04-20",8.6 +"1982-04-21",7.8 +"1982-04-22",9.1 +"1982-04-23",13.0 +"1982-04-24",16.5 +"1982-04-25",12.9 +"1982-04-26",6.9 +"1982-04-27",6.9 +"1982-04-28",8.7 +"1982-04-29",10.0 +"1982-04-30",10.8 +"1982-05-01",7.5 +"1982-05-02",6.3 +"1982-05-03",11.9 +"1982-05-04",13.8 +"1982-05-05",11.8 +"1982-05-06",11.0 +"1982-05-07",10.1 +"1982-05-08",8.5 +"1982-05-09",5.5 +"1982-05-10",7.6 +"1982-05-11",8.7 +"1982-05-12",10.8 +"1982-05-13",11.2 +"1982-05-14",9.1 +"1982-05-15",3.7 +"1982-05-16",4.6 +"1982-05-17",6.6 +"1982-05-18",13.2 +"1982-05-19",15.2 +"1982-05-20",7.6 +"1982-05-21",8.4 +"1982-05-22",6.0 +"1982-05-23",8.3 +"1982-05-24",8.6 +"1982-05-25",11.1 +"1982-05-26",12.1 +"1982-05-27",12.9 +"1982-05-28",14.0 +"1982-05-29",12.5 +"1982-05-30",11.5 +"1982-05-31",7.0 +"1982-06-01",7.1 +"1982-06-02",9.0 +"1982-06-03",3.1 +"1982-06-04",2.5 +"1982-06-05",0.0 +"1982-06-06",1.6 +"1982-06-07",2.6 +"1982-06-08",5.7 +"1982-06-09",2.3 +"1982-06-10",4.5 +"1982-06-11",8.2 +"1982-06-12",6.9 +"1982-06-13",7.3 +"1982-06-14",6.0 +"1982-06-15",7.3 +"1982-06-16",7.6 +"1982-06-17",8.0 +"1982-06-18",8.0 +"1982-06-19",6.8 +"1982-06-20",7.3 +"1982-06-21",6.2 +"1982-06-22",6.9 +"1982-06-23",8.9 +"1982-06-24",4.0 +"1982-06-25",1.3 +"1982-06-26",0.8 +"1982-06-27",4.3 +"1982-06-28",7.3 +"1982-06-29",7.7 +"1982-06-30",9.0 +"1982-07-01",4.2 +"1982-07-02",1.6 +"1982-07-03",2.6 +"1982-07-04",3.4 +"1982-07-05",3.9 +"1982-07-06",7.0 +"1982-07-07",7.8 +"1982-07-08",5.3 +"1982-07-09",2.4 +"1982-07-10",2.8 +"1982-07-11",4.0 +"1982-07-12",7.5 +"1982-07-13",7.8 +"1982-07-14",5.6 +"1982-07-15",3.3 +"1982-07-16",5.0 +"1982-07-17",3.7 +"1982-07-18",3.9 +"1982-07-19",5.2 +"1982-07-20",?0.2 +"1982-07-21",?0.8 +"1982-07-22",0.9 +"1982-07-23",3.5 +"1982-07-24",6.6 +"1982-07-25",9.5 +"1982-07-26",9.0 +"1982-07-27",3.5 +"1982-07-28",4.5 +"1982-07-29",5.7 +"1982-07-30",5.6 +"1982-07-31",7.1 +"1982-08-01",9.7 +"1982-08-02",8.3 +"1982-08-03",9.1 +"1982-08-04",2.8 +"1982-08-05",2.2 +"1982-08-06",4.5 +"1982-08-07",3.8 +"1982-08-08",3.8 +"1982-08-09",6.2 +"1982-08-10",11.5 +"1982-08-11",10.2 +"1982-08-12",7.9 +"1982-08-13",9.0 +"1982-08-14",9.5 +"1982-08-15",6.0 +"1982-08-16",8.2 +"1982-08-17",9.2 +"1982-08-18",4.3 +"1982-08-19",6.6 +"1982-08-20",9.4 +"1982-08-21",13.2 +"1982-08-22",6.6 +"1982-08-23",5.1 +"1982-08-24",12.1 +"1982-08-25",11.2 +"1982-08-26",8.5 +"1982-08-27",4.6 +"1982-08-28",7.0 +"1982-08-29",14.2 +"1982-08-30",12.7 +"1982-08-31",7.6 +"1982-09-01",4.0 +"1982-09-02",10.0 +"1982-09-03",10.5 +"1982-09-04",5.0 +"1982-09-05",4.5 +"1982-09-06",8.2 +"1982-09-07",4.3 +"1982-09-08",9.8 +"1982-09-09",5.8 +"1982-09-10",5.0 +"1982-09-11",8.5 +"1982-09-12",9.0 +"1982-09-13",3.6 +"1982-09-14",6.7 +"1982-09-15",6.7 +"1982-09-16",10.1 +"1982-09-17",15.0 +"1982-09-18",8.9 +"1982-09-19",5.7 +"1982-09-20",4.2 +"1982-09-21",4.0 +"1982-09-22",5.3 +"1982-09-23",6.3 +"1982-09-24",8.5 +"1982-09-25",11.5 +"1982-09-26",7.7 +"1982-09-27",9.2 +"1982-09-28",7.8 +"1982-09-29",6.3 +"1982-09-30",6.3 +"1982-10-01",8.6 +"1982-10-02",6.1 +"1982-10-03",13.2 +"1982-10-04",9.9 +"1982-10-05",4.7 +"1982-10-06",5.8 +"1982-10-07",14.9 +"1982-10-08",10.7 +"1982-10-09",8.6 +"1982-10-10",9.4 +"1982-10-11",5.7 +"1982-10-12",10.9 +"1982-10-13",13.1 +"1982-10-14",10.4 +"1982-10-15",8.2 +"1982-10-16",9.8 +"1982-10-17",7.5 +"1982-10-18",5.8 +"1982-10-19",9.8 +"1982-10-20",7.9 +"1982-10-21",8.7 +"1982-10-22",10.0 +"1982-10-23",10.6 +"1982-10-24",8.0 +"1982-10-25",10.2 +"1982-10-26",15.1 +"1982-10-27",13.9 +"1982-10-28",9.2 +"1982-10-29",9.0 +"1982-10-30",13.2 +"1982-10-31",7.0 +"1982-11-01",10.6 +"1982-11-02",6.9 +"1982-11-03",9.5 +"1982-11-04",12.5 +"1982-11-05",13.6 +"1982-11-06",17.7 +"1982-11-07",16.0 +"1982-11-08",11.3 +"1982-11-09",10.5 +"1982-11-10",14.4 +"1982-11-11",10.3 +"1982-11-12",9.0 +"1982-11-13",11.1 +"1982-11-14",14.5 +"1982-11-15",18.0 +"1982-11-16",12.8 +"1982-11-17",10.7 +"1982-11-18",9.1 +"1982-11-19",8.7 +"1982-11-20",12.4 +"1982-11-21",12.6 +"1982-11-22",10.3 +"1982-11-23",13.7 +"1982-11-24",16.0 +"1982-11-25",15.8 +"1982-11-26",12.1 +"1982-11-27",12.5 +"1982-11-28",12.2 +"1982-11-29",13.7 +"1982-11-30",16.1 +"1982-12-01",15.5 +"1982-12-02",10.3 +"1982-12-03",10.5 +"1982-12-04",11.0 +"1982-12-05",11.9 +"1982-12-06",13.0 +"1982-12-07",12.2 +"1982-12-08",10.6 +"1982-12-09",13.0 +"1982-12-10",13.0 +"1982-12-11",12.2 +"1982-12-12",12.6 +"1982-12-13",18.7 +"1982-12-14",15.2 +"1982-12-15",15.3 +"1982-12-16",13.9 +"1982-12-17",15.8 +"1982-12-18",13.0 +"1982-12-19",13.0 +"1982-12-20",13.7 +"1982-12-21",12.0 +"1982-12-22",10.8 +"1982-12-23",15.6 +"1982-12-24",15.3 +"1982-12-25",13.9 +"1982-12-26",13.0 +"1982-12-27",15.3 +"1982-12-28",16.3 +"1982-12-29",15.8 +"1982-12-30",17.7 +"1982-12-31",16.3 +"1983-01-01",18.4 +"1983-01-02",15.0 +"1983-01-03",10.9 +"1983-01-04",11.4 +"1983-01-05",14.8 +"1983-01-06",12.1 +"1983-01-07",12.8 +"1983-01-08",16.2 +"1983-01-09",15.5 +"1983-01-10",13.0 +"1983-01-11",10.5 +"1983-01-12",9.1 +"1983-01-13",10.5 +"1983-01-14",11.8 +"1983-01-15",12.7 +"1983-01-16",12.7 +"1983-01-17",11.5 +"1983-01-18",13.8 +"1983-01-19",13.3 +"1983-01-20",11.6 +"1983-01-21",15.4 +"1983-01-22",12.4 +"1983-01-23",16.9 +"1983-01-24",14.7 +"1983-01-25",10.6 +"1983-01-26",15.6 +"1983-01-27",10.7 +"1983-01-28",12.6 +"1983-01-29",13.8 +"1983-01-30",14.3 +"1983-01-31",14.0 +"1983-02-01",18.1 +"1983-02-02",17.3 +"1983-02-03",13.0 +"1983-02-04",16.0 +"1983-02-05",14.9 +"1983-02-06",16.2 +"1983-02-07",20.3 +"1983-02-08",22.5 +"1983-02-09",17.2 +"1983-02-10",15.9 +"1983-02-11",16.8 +"1983-02-12",13.8 +"1983-02-13",12.8 +"1983-02-14",14.0 +"1983-02-15",17.5 +"1983-02-16",21.5 +"1983-02-17",16.8 +"1983-02-18",13.6 +"1983-02-19",14.5 +"1983-02-20",14.2 +"1983-02-21",15.7 +"1983-02-22",19.7 +"1983-02-23",17.4 +"1983-02-24",14.4 +"1983-02-25",16.9 +"1983-02-26",19.1 +"1983-02-27",20.4 +"1983-02-28",20.1 +"1983-03-01",19.9 +"1983-03-02",22.0 +"1983-03-03",20.5 +"1983-03-04",22.1 +"1983-03-05",20.6 +"1983-03-06",15.0 +"1983-03-07",20.6 +"1983-03-08",21.5 +"1983-03-09",16.2 +"1983-03-10",14.1 +"1983-03-11",14.5 +"1983-03-12",21.1 +"1983-03-13",15.9 +"1983-03-14",15.2 +"1983-03-15",13.1 +"1983-03-16",13.2 +"1983-03-17",12.5 +"1983-03-18",15.2 +"1983-03-19",17.6 +"1983-03-20",15.5 +"1983-03-21",16.7 +"1983-03-22",16.3 +"1983-03-23",15.1 +"1983-03-24",12.7 +"1983-03-25",10.0 +"1983-03-26",11.4 +"1983-03-27",12.6 +"1983-03-28",10.7 +"1983-03-29",10.0 +"1983-03-30",13.9 +"1983-03-31",13.4 +"1983-04-01",12.5 +"1983-04-02",12.8 +"1983-04-03",7.8 +"1983-04-04",11.1 +"1983-04-05",10.7 +"1983-04-06",7.1 +"1983-04-07",6.7 +"1983-04-08",5.7 +"1983-04-09",9.1 +"1983-04-10",15.2 +"1983-04-11",15.5 +"1983-04-12",11.1 +"1983-04-13",11.7 +"1983-04-14",11.5 +"1983-04-15",9.8 +"1983-04-16",6.2 +"1983-04-17",6.7 +"1983-04-18",7.5 +"1983-04-19",8.8 +"1983-04-20",8.0 +"1983-04-21",10.4 +"1983-04-22",14.5 +"1983-04-23",16.5 +"1983-04-24",14.1 +"1983-04-25",10.5 +"1983-04-26",12.6 +"1983-04-27",13.0 +"1983-04-28",8.7 +"1983-04-29",10.1 +"1983-04-30",12.0 +"1983-05-01",12.5 +"1983-05-02",13.5 +"1983-05-03",13.7 +"1983-05-04",13.5 +"1983-05-05",10.7 +"1983-05-06",13.0 +"1983-05-07",11.6 +"1983-05-08",13.0 +"1983-05-09",11.2 +"1983-05-10",13.5 +"1983-05-11",12.9 +"1983-05-12",6.8 +"1983-05-13",10.0 +"1983-05-14",14.5 +"1983-05-15",11.7 +"1983-05-16",6.7 +"1983-05-17",4.6 +"1983-05-18",4.9 +"1983-05-19",7.4 +"1983-05-20",8.3 +"1983-05-21",7.5 +"1983-05-22",6.2 +"1983-05-23",7.8 +"1983-05-24",13.2 +"1983-05-25",11.9 +"1983-05-26",6.5 +"1983-05-27",8.3 +"1983-05-28",12.1 +"1983-05-29",9.3 +"1983-05-30",7.5 +"1983-05-31",9.3 +"1983-06-01",11.0 +"1983-06-02",10.8 +"1983-06-03",5.3 +"1983-06-04",7.6 +"1983-06-05",5.6 +"1983-06-06",7.2 +"1983-06-07",9.6 +"1983-06-08",7.0 +"1983-06-09",8.3 +"1983-06-10",7.8 +"1983-06-11",4.7 +"1983-06-12",6.8 +"1983-06-13",7.2 +"1983-06-14",8.3 +"1983-06-15",9.5 +"1983-06-16",4.7 +"1983-06-17",3.0 +"1983-06-18",1.5 +"1983-06-19",2.5 +"1983-06-20",6.2 +"1983-06-21",11.6 +"1983-06-22",6.6 +"1983-06-23",6.6 +"1983-06-24",8.0 +"1983-06-25",7.9 +"1983-06-26",3.3 +"1983-06-27",3.9 +"1983-06-28",6.0 +"1983-06-29",4.0 +"1983-06-30",5.5 +"1983-07-01",8.5 +"1983-07-02",9.8 +"1983-07-03",9.5 +"1983-07-04",7.2 +"1983-07-05",8.1 +"1983-07-06",8.0 +"1983-07-07",8.5 +"1983-07-08",8.8 +"1983-07-09",8.3 +"1983-07-10",2.4 +"1983-07-11",4.9 +"1983-07-12",5.9 +"1983-07-13",6.7 +"1983-07-14",8.4 +"1983-07-15",6.5 +"1983-07-16",7.9 +"1983-07-17",4.1 +"1983-07-18",5.4 +"1983-07-19",7.5 +"1983-07-20",3.9 +"1983-07-21",2.5 +"1983-07-22",5.3 +"1983-07-23",6.6 +"1983-07-24",0.0 +"1983-07-25",0.7 +"1983-07-26",7.6 +"1983-07-27",12.3 +"1983-07-28",9.2 +"1983-07-29",9.6 +"1983-07-30",9.5 +"1983-07-31",10.0 +"1983-08-01",7.7 +"1983-08-02",8.0 +"1983-08-03",8.3 +"1983-08-04",8.3 +"1983-08-05",4.5 +"1983-08-06",6.5 +"1983-08-07",9.4 +"1983-08-08",9.4 +"1983-08-09",10.5 +"1983-08-10",10.7 +"1983-08-11",9.9 +"1983-08-12",7.6 +"1983-08-13",5.8 +"1983-08-14",8.5 +"1983-08-15",13.8 +"1983-08-16",14.3 +"1983-08-17",8.3 +"1983-08-18",5.3 +"1983-08-19",3.0 +"1983-08-20",5.2 +"1983-08-21",10.3 +"1983-08-22",11.1 +"1983-08-23",10.5 +"1983-08-24",9.0 +"1983-08-25",13.0 +"1983-08-26",6.4 +"1983-08-27",8.4 +"1983-08-28",6.7 +"1983-08-29",8.3 +"1983-08-30",11.2 +"1983-08-31",10.0 +"1983-09-01",10.1 +"1983-09-02",10.6 +"1983-09-03",10.9 +"1983-09-04",5.7 +"1983-09-05",9.5 +"1983-09-06",10.4 +"1983-09-07",11.1 +"1983-09-08",12.2 +"1983-09-09",10.6 +"1983-09-10",8.8 +"1983-09-11",9.2 +"1983-09-12",5.5 +"1983-09-13",7.1 +"1983-09-14",6.5 +"1983-09-15",4.3 +"1983-09-16",5.0 +"1983-09-17",11.2 +"1983-09-18",7.5 +"1983-09-19",12.0 +"1983-09-20",13.6 +"1983-09-21",8.3 +"1983-09-22",8.5 +"1983-09-23",12.9 +"1983-09-24",7.7 +"1983-09-25",7.6 +"1983-09-26",3.5 +"1983-09-27",10.4 +"1983-09-28",15.4 +"1983-09-29",10.6 +"1983-09-30",9.6 +"1983-10-01",9.3 +"1983-10-02",13.9 +"1983-10-03",7.7 +"1983-10-04",9.5 +"1983-10-05",7.6 +"1983-10-06",6.9 +"1983-10-07",6.8 +"1983-10-08",5.8 +"1983-10-09",6.0 +"1983-10-10",8.3 +"1983-10-11",9.1 +"1983-10-12",12.5 +"1983-10-13",13.2 +"1983-10-14",16.2 +"1983-10-15",12.5 +"1983-10-16",11.8 +"1983-10-17",10.6 +"1983-10-18",10.0 +"1983-10-19",12.2 +"1983-10-20",8.9 +"1983-10-21",10.3 +"1983-10-22",7.5 +"1983-10-23",11.6 +"1983-10-24",12.6 +"1983-10-25",12.9 +"1983-10-26",11.7 +"1983-10-27",14.0 +"1983-10-28",12.3 +"1983-10-29",9.0 +"1983-10-30",9.2 +"1983-10-31",9.8 +"1983-11-01",11.8 +"1983-11-02",10.6 +"1983-11-03",12.6 +"1983-11-04",11.0 +"1983-11-05",8.2 +"1983-11-06",7.5 +"1983-11-07",13.6 +"1983-11-08",14.8 +"1983-11-09",10.9 +"1983-11-10",7.7 +"1983-11-11",10.2 +"1983-11-12",10.8 +"1983-11-13",10.8 +"1983-11-14",12.5 +"1983-11-15",13.2 +"1983-11-16",8.7 +"1983-11-17",5.7 +"1983-11-18",9.8 +"1983-11-19",7.3 +"1983-11-20",10.8 +"1983-11-21",10.0 +"1983-11-22",16.2 +"1983-11-23",15.0 +"1983-11-24",14.5 +"1983-11-25",15.9 +"1983-11-26",14.9 +"1983-11-27",14.2 +"1983-11-28",15.8 +"1983-11-29",17.2 +"1983-11-30",17.6 +"1983-12-01",12.1 +"1983-12-02",11.4 +"1983-12-03",13.0 +"1983-12-04",13.2 +"1983-12-05",12.0 +"1983-12-06",15.3 +"1983-12-07",12.7 +"1983-12-08",12.1 +"1983-12-09",13.8 +"1983-12-10",10.9 +"1983-12-11",12.0 +"1983-12-12",16.5 +"1983-12-13",15.0 +"1983-12-14",11.2 +"1983-12-15",13.9 +"1983-12-16",15.0 +"1983-12-17",14.8 +"1983-12-18",15.0 +"1983-12-19",13.3 +"1983-12-20",20.4 +"1983-12-21",18.0 +"1983-12-22",12.2 +"1983-12-23",16.7 +"1983-12-24",13.8 +"1983-12-25",17.5 +"1983-12-26",15.0 +"1983-12-27",13.9 +"1983-12-28",11.1 +"1983-12-29",16.1 +"1983-12-30",20.4 +"1983-12-31",18.0 +"1984-01-01",19.5 +"1984-01-02",17.1 +"1984-01-03",17.1 +"1984-01-04",12.0 +"1984-01-05",11.0 +"1984-01-06",16.3 +"1984-01-07",16.1 +"1984-01-08",13.0 +"1984-01-09",13.4 +"1984-01-10",15.2 +"1984-01-11",12.5 +"1984-01-12",14.3 +"1984-01-13",16.5 +"1984-01-14",18.6 +"1984-01-15",18.0 +"1984-01-16",18.2 +"1984-01-17",11.4 +"1984-01-18",11.9 +"1984-01-19",12.2 +"1984-01-20",14.8 +"1984-01-21",13.1 +"1984-01-22",12.7 +"1984-01-23",10.5 +"1984-01-24",13.8 +"1984-01-25",18.8 +"1984-01-26",13.9 +"1984-01-27",11.2 +"1984-01-28",10.6 +"1984-01-29",14.7 +"1984-01-30",13.1 +"1984-01-31",12.1 +"1984-02-01",14.7 +"1984-02-02",11.1 +"1984-02-03",13.0 +"1984-02-04",15.6 +"1984-02-05",14.2 +"1984-02-06",15.5 +"1984-02-07",18.0 +"1984-02-08",15.0 +"1984-02-09",15.9 +"1984-02-10",15.5 +"1984-02-11",15.8 +"1984-02-12",16.6 +"1984-02-13",13.6 +"1984-02-14",13.8 +"1984-02-15",14.6 +"1984-02-16",15.6 +"1984-02-17",16.6 +"1984-02-18",14.3 +"1984-02-19",16.3 +"1984-02-20",18.9 +"1984-02-21",18.7 +"1984-02-22",14.5 +"1984-02-23",16.5 +"1984-02-24",14.1 +"1984-02-25",13.5 +"1984-02-26",11.7 +"1984-02-27",15.1 +"1984-02-28",11.2 +"1984-02-29",13.5 +"1984-03-01",12.6 +"1984-03-02",8.8 +"1984-03-03",10.5 +"1984-03-04",12.1 +"1984-03-05",14.5 +"1984-03-06",19.5 +"1984-03-07",14.0 +"1984-03-08",13.8 +"1984-03-09",10.5 +"1984-03-10",13.8 +"1984-03-11",11.4 +"1984-03-12",15.6 +"1984-03-13",11.1 +"1984-03-14",12.1 +"1984-03-15",14.2 +"1984-03-16",10.9 +"1984-03-17",14.2 +"1984-03-18",13.8 +"1984-03-19",15.1 +"1984-03-20",14.0 +"1984-03-21",12.1 +"1984-03-22",13.8 +"1984-03-23",16.6 +"1984-03-24",17.8 +"1984-03-25",9.4 +"1984-03-26",10.2 +"1984-03-27",7.4 +"1984-03-28",8.7 +"1984-03-29",14.0 +"1984-03-30",15.3 +"1984-03-31",11.1 +"1984-04-01",9.7 +"1984-04-02",10.3 +"1984-04-03",9.2 +"1984-04-04",8.2 +"1984-04-05",9.7 +"1984-04-06",12.4 +"1984-04-07",12.5 +"1984-04-08",9.0 +"1984-04-09",9.7 +"1984-04-10",10.1 +"1984-04-11",11.2 +"1984-04-12",12.0 +"1984-04-13",11.1 +"1984-04-14",10.8 +"1984-04-15",12.8 +"1984-04-16",9.8 +"1984-04-17",13.7 +"1984-04-18",11.0 +"1984-04-19",13.2 +"1984-04-20",13.0 +"1984-04-21",10.2 +"1984-04-22",13.2 +"1984-04-23",9.3 +"1984-04-24",11.1 +"1984-04-25",10.3 +"1984-04-26",8.7 +"1984-04-27",11.7 +"1984-04-28",12.5 +"1984-04-29",6.5 +"1984-04-30",9.6 +"1984-05-01",13.8 +"1984-05-02",14.7 +"1984-05-03",9.1 +"1984-05-04",4.8 +"1984-05-05",3.3 +"1984-05-06",3.5 +"1984-05-07",5.7 +"1984-05-08",5.5 +"1984-05-09",7.0 +"1984-05-10",9.5 +"1984-05-11",9.9 +"1984-05-12",4.9 +"1984-05-13",6.3 +"1984-05-14",4.8 +"1984-05-15",6.2 +"1984-05-16",7.1 +"1984-05-17",7.5 +"1984-05-18",9.4 +"1984-05-19",8.7 +"1984-05-20",9.5 +"1984-05-21",12.1 +"1984-05-22",9.5 +"1984-05-23",9.3 +"1984-05-24",8.5 +"1984-05-25",8.0 +"1984-05-26",9.8 +"1984-05-27",6.2 +"1984-05-28",7.3 +"1984-05-29",10.9 +"1984-05-30",10.0 +"1984-05-31",8.7 +"1984-06-01",9.0 +"1984-06-02",10.8 +"1984-06-03",12.4 +"1984-06-04",7.2 +"1984-06-05",7.2 +"1984-06-06",11.1 +"1984-06-07",9.3 +"1984-06-08",10.1 +"1984-06-09",3.9 +"1984-06-10",5.0 +"1984-06-11",8.2 +"1984-06-12",2.8 +"1984-06-13",4.3 +"1984-06-14",8.1 +"1984-06-15",11.1 +"1984-06-16",4.7 +"1984-06-17",5.3 +"1984-06-18",10.0 +"1984-06-19",5.6 +"1984-06-20",2.2 +"1984-06-21",7.1 +"1984-06-22",8.3 +"1984-06-23",8.6 +"1984-06-24",10.1 +"1984-06-25",8.3 +"1984-06-26",7.2 +"1984-06-27",7.7 +"1984-06-28",7.8 +"1984-06-29",9.1 +"1984-06-30",9.4 +"1984-07-01",7.8 +"1984-07-02",2.6 +"1984-07-03",2.4 +"1984-07-04",3.9 +"1984-07-05",1.3 +"1984-07-06",2.1 +"1984-07-07",7.4 +"1984-07-08",7.2 +"1984-07-09",8.8 +"1984-07-10",8.9 +"1984-07-11",8.8 +"1984-07-12",8.0 +"1984-07-13",0.7 +"1984-07-14",?0.1 +"1984-07-15",0.9 +"1984-07-16",7.8 +"1984-07-17",7.2 +"1984-07-18",8.0 +"1984-07-19",4.6 +"1984-07-20",5.2 +"1984-07-21",5.8 +"1984-07-22",6.8 +"1984-07-23",8.1 +"1984-07-24",7.5 +"1984-07-25",5.4 +"1984-07-26",4.6 +"1984-07-27",6.4 +"1984-07-28",9.7 +"1984-07-29",7.0 +"1984-07-30",10.0 +"1984-07-31",10.6 +"1984-08-01",11.5 +"1984-08-02",10.2 +"1984-08-03",11.1 +"1984-08-04",11.0 +"1984-08-05",8.9 +"1984-08-06",9.9 +"1984-08-07",11.7 +"1984-08-08",11.6 +"1984-08-09",9.0 +"1984-08-10",6.3 +"1984-08-11",8.7 +"1984-08-12",8.5 +"1984-08-13",8.5 +"1984-08-14",8.0 +"1984-08-15",6.0 +"1984-08-16",8.0 +"1984-08-17",8.5 +"1984-08-18",7.7 +"1984-08-19",8.4 +"1984-08-20",9.0 +"1984-08-21",8.3 +"1984-08-22",6.8 +"1984-08-23",9.3 +"1984-08-24",6.7 +"1984-08-25",9.0 +"1984-08-26",7.3 +"1984-08-27",6.3 +"1984-08-28",7.9 +"1984-08-29",5.2 +"1984-08-30",9.0 +"1984-08-31",11.3 +"1984-09-01",9.2 +"1984-09-02",11.3 +"1984-09-03",7.0 +"1984-09-04",8.0 +"1984-09-05",4.6 +"1984-09-06",8.5 +"1984-09-07",9.5 +"1984-09-08",9.4 +"1984-09-09",10.5 +"1984-09-10",9.7 +"1984-09-11",4.9 +"1984-09-12",8.0 +"1984-09-13",5.8 +"1984-09-14",5.5 +"1984-09-15",10.9 +"1984-09-16",11.7 +"1984-09-17",9.2 +"1984-09-18",8.9 +"1984-09-19",11.3 +"1984-09-20",8.6 +"1984-09-21",6.2 +"1984-09-22",6.6 +"1984-09-23",9.1 +"1984-09-24",6.1 +"1984-09-25",7.5 +"1984-09-26",10.7 +"1984-09-27",6.3 +"1984-09-28",5.5 +"1984-09-29",6.7 +"1984-09-30",4.2 +"1984-10-01",11.3 +"1984-10-02",16.3 +"1984-10-03",10.5 +"1984-10-04",10.3 +"1984-10-05",7.9 +"1984-10-06",7.7 +"1984-10-07",16.0 +"1984-10-08",14.6 +"1984-10-09",12.5 +"1984-10-10",8.1 +"1984-10-11",12.2 +"1984-10-12",17.2 +"1984-10-13",9.4 +"1984-10-14",8.7 +"1984-10-15",5.9 +"1984-10-16",4.8 +"1984-10-17",7.4 +"1984-10-18",9.4 +"1984-10-19",9.7 +"1984-10-20",9.9 +"1984-10-21",6.5 +"1984-10-22",9.8 +"1984-10-23",18.2 +"1984-10-24",11.3 +"1984-10-25",9.1 +"1984-10-26",9.6 +"1984-10-27",13.5 +"1984-10-28",10.7 +"1984-10-29",10.0 +"1984-10-30",8.5 +"1984-10-31",12.6 +"1984-11-01",16.6 +"1984-11-02",11.6 +"1984-11-03",12.2 +"1984-11-04",11.2 +"1984-11-05",9.2 +"1984-11-06",9.9 +"1984-11-07",11.9 +"1984-11-08",15.6 +"1984-11-09",19.0 +"1984-11-10",12.8 +"1984-11-11",12.2 +"1984-11-12",12.0 +"1984-11-13",11.1 +"1984-11-14",11.8 +"1984-11-15",7.6 +"1984-11-16",13.0 +"1984-11-17",12.7 +"1984-11-18",16.0 +"1984-11-19",14.8 +"1984-11-20",14.2 +"1984-11-21",10.0 +"1984-11-22",8.8 +"1984-11-23",11.6 +"1984-11-24",8.6 +"1984-11-25",14.6 +"1984-11-26",24.3 +"1984-11-27",11.6 +"1984-11-28",10.8 +"1984-11-29",12.0 +"1984-11-30",11.0 +"1984-12-01",12.6 +"1984-12-02",10.8 +"1984-12-03",9.1 +"1984-12-04",11.0 +"1984-12-05",13.0 +"1984-12-06",12.8 +"1984-12-07",9.9 +"1984-12-08",11.6 +"1984-12-09",10.5 +"1984-12-10",15.9 +"1984-12-11",12.2 +"1984-12-12",13.0 +"1984-12-13",12.5 +"1984-12-14",12.5 +"1984-12-15",11.4 +"1984-12-16",12.1 +"1984-12-17",16.8 +"1984-12-18",12.1 +"1984-12-19",11.3 +"1984-12-20",10.4 +"1984-12-21",14.2 +"1984-12-22",11.4 +"1984-12-23",13.7 +"1984-12-24",16.5 +"1984-12-25",12.8 +"1984-12-26",12.2 +"1984-12-27",12.0 +"1984-12-28",12.6 +"1984-12-29",16.0 +"1984-12-30",16.4 +"1985-01-01",13.3 +"1985-01-02",15.2 +"1985-01-03",13.1 +"1985-01-04",12.7 +"1985-01-05",14.6 +"1985-01-06",11.0 +"1985-01-07",13.2 +"1985-01-08",12.2 +"1985-01-09",14.4 +"1985-01-10",13.7 +"1985-01-11",14.5 +"1985-01-12",14.1 +"1985-01-13",14.4 +"1985-01-14",19.7 +"1985-01-15",16.5 +"1985-01-16",15.9 +"1985-01-17",11.8 +"1985-01-18",12.0 +"1985-01-19",11.4 +"1985-01-20",14.4 +"1985-01-21",12.4 +"1985-01-22",15.1 +"1985-01-23",15.6 +"1985-01-24",15.2 +"1985-01-25",12.8 +"1985-01-26",13.3 +"1985-01-27",17.5 +"1985-01-28",15.4 +"1985-01-29",13.5 +"1985-01-30",16.7 +"1985-01-31",15.2 +"1985-02-01",14.9 +"1985-02-02",10.2 +"1985-02-03",13.6 +"1985-02-04",19.0 +"1985-02-05",15.7 +"1985-02-06",18.0 +"1985-02-07",14.8 +"1985-02-08",13.9 +"1985-02-09",13.0 +"1985-02-10",15.3 +"1985-02-11",14.3 +"1985-02-12",15.6 +"1985-02-13",16.0 +"1985-02-14",14.9 +"1985-02-15",11.1 +"1985-02-16",14.8 +"1985-02-17",13.0 +"1985-02-18",12.2 +"1985-02-19",10.9 +"1985-02-20",14.6 +"1985-02-21",16.6 +"1985-02-22",18.1 +"1985-02-23",13.4 +"1985-02-24",10.3 +"1985-02-25",13.6 +"1985-02-26",13.8 +"1985-02-27",10.3 +"1985-02-28",11.0 +"1985-03-01",14.3 +"1985-03-02",15.5 +"1985-03-03",14.7 +"1985-03-04",12.7 +"1985-03-05",10.7 +"1985-03-06",12.6 +"1985-03-07",9.8 +"1985-03-08",13.2 +"1985-03-09",15.2 +"1985-03-10",16.6 +"1985-03-11",21.0 +"1985-03-12",22.4 +"1985-03-13",17.0 +"1985-03-14",21.7 +"1985-03-15",21.4 +"1985-03-16",18.6 +"1985-03-17",16.2 +"1985-03-18",16.8 +"1985-03-19",17.0 +"1985-03-20",18.4 +"1985-03-21",17.2 +"1985-03-22",18.4 +"1985-03-23",18.8 +"1985-03-24",16.5 +"1985-03-25",13.3 +"1985-03-26",12.2 +"1985-03-27",11.3 +"1985-03-28",13.8 +"1985-03-29",16.6 +"1985-03-30",14.0 +"1985-03-31",14.3 +"1985-04-01",16.4 +"1985-04-02",11.9 +"1985-04-03",15.7 +"1985-04-04",17.6 +"1985-04-05",17.5 +"1985-04-06",15.9 +"1985-04-07",16.2 +"1985-04-08",16.0 +"1985-04-09",15.9 +"1985-04-10",16.2 +"1985-04-11",16.2 +"1985-04-12",19.5 +"1985-04-13",18.2 +"1985-04-14",21.8 +"1985-04-15",15.1 +"1985-04-16",11.0 +"1985-04-17",8.1 +"1985-04-18",9.5 +"1985-04-19",9.3 +"1985-04-20",10.6 +"1985-04-21",6.3 +"1985-04-22",8.6 +"1985-04-23",6.8 +"1985-04-24",8.7 +"1985-04-25",8.4 +"1985-04-26",9.3 +"1985-04-27",10.0 +"1985-04-28",10.5 +"1985-04-29",12.0 +"1985-04-30",10.1 +"1985-05-01",9.4 +"1985-05-02",10.1 +"1985-05-03",8.0 +"1985-05-04",10.6 +"1985-05-05",13.6 +"1985-05-06",15.4 +"1985-05-07",9.0 +"1985-05-08",10.4 +"1985-05-09",11.0 +"1985-05-10",12.1 +"1985-05-11",13.4 +"1985-05-12",11.3 +"1985-05-13",6.7 +"1985-05-14",9.8 +"1985-05-15",10.8 +"1985-05-16",7.8 +"1985-05-17",4.5 +"1985-05-18",7.6 +"1985-05-19",6.9 +"1985-05-20",7.5 +"1985-05-21",8.5 +"1985-05-22",5.5 +"1985-05-23",9.5 +"1985-05-24",7.3 +"1985-05-25",5.4 +"1985-05-26",5.5 +"1985-05-27",8.1 +"1985-05-28",11.2 +"1985-05-29",13.4 +"1985-05-30",11.6 +"1985-05-31",10.1 +"1985-06-01",4.3 +"1985-06-02",5.5 +"1985-06-03",4.4 +"1985-06-04",5.9 +"1985-06-05",5.7 +"1985-06-06",8.2 +"1985-06-07",8.2 +"1985-06-08",4.2 +"1985-06-09",6.5 +"1985-06-10",10.0 +"1985-06-11",8.8 +"1985-06-12",6.6 +"1985-06-13",7.8 +"1985-06-14",10.1 +"1985-06-15",7.1 +"1985-06-16",7.7 +"1985-06-17",8.5 +"1985-06-18",7.3 +"1985-06-19",6.9 +"1985-06-20",8.4 +"1985-06-21",7.1 +"1985-06-22",6.3 +"1985-06-23",0.6 +"1985-06-24",1.6 +"1985-06-25",7.0 +"1985-06-26",8.3 +"1985-06-27",8.0 +"1985-06-28",10.2 +"1985-06-29",10.6 +"1985-06-30",10.4 +"1985-07-01",11.6 +"1985-07-02",11.0 +"1985-07-03",10.7 +"1985-07-04",7.3 +"1985-07-05",4.2 +"1985-07-06",4.7 +"1985-07-07",5.6 +"1985-07-08",7.7 +"1985-07-09",7.5 +"1985-07-10",4.9 +"1985-07-11",5.9 +"1985-07-12",7.8 +"1985-07-13",5.8 +"1985-07-14",7.0 +"1985-07-15",8.4 +"1985-07-16",6.2 +"1985-07-17",7.5 +"1985-07-18",4.8 +"1985-07-19",3.3 +"1985-07-20",3.2 +"1985-07-21",7.0 +"1985-07-22",8.4 +"1985-07-23",0.3 +"1985-07-24",0.3 +"1985-07-25",2.1 +"1985-07-26",8.5 +"1985-07-27",1.4 +"1985-07-28",4.1 +"1985-07-29",10.3 +"1985-07-30",6.6 +"1985-07-31",6.1 +"1985-08-01",7.0 +"1985-08-02",5.1 +"1985-08-03",6.3 +"1985-08-04",6.9 +"1985-08-05",11.4 +"1985-08-06",10.4 +"1985-08-07",10.3 +"1985-08-08",9.2 +"1985-08-09",7.2 +"1985-08-10",7.5 +"1985-08-11",4.0 +"1985-08-12",5.6 +"1985-08-13",6.7 +"1985-08-14",8.4 +"1985-08-15",11.0 +"1985-08-16",8.4 +"1985-08-17",8.8 +"1985-08-18",8.6 +"1985-08-19",8.3 +"1985-08-20",4.0 +"1985-08-21",3.6 +"1985-08-22",5.7 +"1985-08-23",10.6 +"1985-08-24",6.9 +"1985-08-25",10.0 +"1985-08-26",9.8 +"1985-08-27",7.2 +"1985-08-28",10.5 +"1985-08-29",3.6 +"1985-08-30",5.3 +"1985-08-31",8.4 +"1985-09-01",10.3 +"1985-09-02",7.9 +"1985-09-03",8.5 +"1985-09-04",7.9 +"1985-09-05",8.0 +"1985-09-06",9.8 +"1985-09-07",6.7 +"1985-09-08",4.8 +"1985-09-09",9.9 +"1985-09-10",12.8 +"1985-09-11",10.9 +"1985-09-12",11.7 +"1985-09-13",11.7 +"1985-09-14",11.0 +"1985-09-15",8.2 +"1985-09-16",7.5 +"1985-09-17",5.4 +"1985-09-18",7.2 +"1985-09-19",9.7 +"1985-09-20",8.4 +"1985-09-21",9.0 +"1985-09-22",8.7 +"1985-09-23",6.6 +"1985-09-24",11.6 +"1985-09-25",13.1 +"1985-09-26",6.7 +"1985-09-27",6.5 +"1985-09-28",7.7 +"1985-09-29",8.7 +"1985-09-30",7.2 +"1985-10-01",10.5 +"1985-10-02",8.6 +"1985-10-03",7.2 +"1985-10-04",11.4 +"1985-10-05",16.2 +"1985-10-06",6.1 +"1985-10-07",9.6 +"1985-10-08",11.1 +"1985-10-09",13.6 +"1985-10-10",10.7 +"1985-10-11",14.7 +"1985-10-12",11.6 +"1985-10-13",7.3 +"1985-10-14",8.0 +"1985-10-15",9.6 +"1985-10-16",16.0 +"1985-10-17",15.1 +"1985-10-18",12.8 +"1985-10-19",6.2 +"1985-10-20",7.1 +"1985-10-21",8.4 +"1985-10-22",10.0 +"1985-10-23",12.7 +"1985-10-24",10.0 +"1985-10-25",10.2 +"1985-10-26",6.5 +"1985-10-27",9.2 +"1985-10-28",11.9 +"1985-10-29",14.7 +"1985-10-30",11.4 +"1985-10-31",6.8 +"1985-11-01",7.4 +"1985-11-02",11.2 +"1985-11-03",9.2 +"1985-11-04",12.6 +"1985-11-05",16.0 +"1985-11-06",17.1 +"1985-11-07",15.3 +"1985-11-08",13.3 +"1985-11-09",15.4 +"1985-11-10",13.2 +"1985-11-11",14.4 +"1985-11-12",14.0 +"1985-11-13",15.5 +"1985-11-14",21.0 +"1985-11-15",10.0 +"1985-11-16",9.6 +"1985-11-17",12.0 +"1985-11-18",12.2 +"1985-11-19",11.3 +"1985-11-20",13.2 +"1985-11-21",10.5 +"1985-11-22",10.1 +"1985-11-23",8.8 +"1985-11-24",13.7 +"1985-11-25",16.2 +"1985-11-26",16.0 +"1985-11-27",14.0 +"1985-11-28",13.7 +"1985-11-29",12.5 +"1985-11-30",12.8 +"1985-12-01",12.3 +"1985-12-02",15.2 +"1985-12-03",15.0 +"1985-12-04",16.4 +"1985-12-05",16.1 +"1985-12-06",14.6 +"1985-12-07",18.2 +"1985-12-08",16.4 +"1985-12-09",16.6 +"1985-12-10",14.7 +"1985-12-11",15.8 +"1985-12-12",14.1 +"1985-12-13",13.5 +"1985-12-14",13.6 +"1985-12-15",13.7 +"1985-12-16",13.6 +"1985-12-17",12.1 +"1985-12-18",12.7 +"1985-12-19",13.3 +"1985-12-20",14.2 +"1985-12-21",15.0 +"1985-12-22",13.7 +"1985-12-23",12.0 +"1985-12-24",13.1 +"1985-12-25",13.2 +"1985-12-26",13.3 +"1985-12-27",11.5 +"1985-12-28",10.8 +"1985-12-29",12.0 +"1985-12-30",16.3 +"1985-12-31",14.4 +"1986-01-01",12.9 +"1986-01-02",13.8 +"1986-01-03",10.6 +"1986-01-04",12.6 +"1986-01-05",13.7 +"1986-01-06",12.6 +"1986-01-07",13.1 +"1986-01-08",15.4 +"1986-01-09",11.9 +"1986-01-10",13.8 +"1986-01-11",14.4 +"1986-01-12",15.2 +"1986-01-13",12.5 +"1986-01-14",12.2 +"1986-01-15",16.1 +"1986-01-16",14.6 +"1986-01-17",11.6 +"1986-01-18",13.1 +"1986-01-19",12.8 +"1986-01-20",15.2 +"1986-01-21",13.8 +"1986-01-22",15.0 +"1986-01-23",13.5 +"1986-01-24",11.8 +"1986-01-25",15.3 +"1986-01-26",13.5 +"1986-01-27",15.3 +"1986-01-28",13.8 +"1986-01-29",15.8 +"1986-01-30",17.4 +"1986-01-31",15.3 +"1986-02-01",14.6 +"1986-02-02",14.8 +"1986-02-03",10.7 +"1986-02-04",11.6 +"1986-02-05",13.6 +"1986-02-06",14.4 +"1986-02-07",11.8 +"1986-02-08",15.8 +"1986-02-09",16.0 +"1986-02-10",11.8 +"1986-02-11",14.5 +"1986-02-12",10.7 +"1986-02-13",14.2 +"1986-02-14",19.5 +"1986-02-15",21.4 +"1986-02-16",17.9 +"1986-02-17",17.4 +"1986-02-18",12.7 +"1986-02-19",13.8 +"1986-02-20",14.0 +"1986-02-21",15.0 +"1986-02-22",14.5 +"1986-02-23",13.1 +"1986-02-24",11.4 +"1986-02-25",12.5 +"1986-02-26",12.0 +"1986-02-27",13.4 +"1986-02-28",14.4 +"1986-03-01",17.7 +"1986-03-02",13.9 +"1986-03-03",13.3 +"1986-03-04",14.6 +"1986-03-05",16.4 +"1986-03-06",16.8 +"1986-03-07",20.0 +"1986-03-08",12.5 +"1986-03-09",12.7 +"1986-03-10",11.7 +"1986-03-11",12.7 +"1986-03-12",8.6 +"1986-03-13",11.9 +"1986-03-14",16.0 +"1986-03-15",15.2 +"1986-03-16",13.4 +"1986-03-17",11.6 +"1986-03-18",11.1 +"1986-03-19",15.6 +"1986-03-20",17.0 +"1986-03-21",18.5 +"1986-03-22",17.4 +"1986-03-23",16.5 +"1986-03-24",16.2 +"1986-03-25",16.1 +"1986-03-26",13.2 +"1986-03-27",18.0 +"1986-03-28",12.8 +"1986-03-29",11.7 +"1986-03-30",16.7 +"1986-03-31",15.6 +"1986-04-01",10.2 +"1986-04-02",10.3 +"1986-04-03",15.0 +"1986-04-04",18.0 +"1986-04-05",13.8 +"1986-04-06",10.5 +"1986-04-07",11.8 +"1986-04-08",7.2 +"1986-04-09",11.6 +"1986-04-10",7.4 +"1986-04-11",14.2 +"1986-04-12",12.2 +"1986-04-13",9.0 +"1986-04-14",12.3 +"1986-04-15",19.7 +"1986-04-16",12.8 +"1986-04-17",12.4 +"1986-04-18",12.0 +"1986-04-19",12.0 +"1986-04-20",11.1 +"1986-04-21",12.7 +"1986-04-22",14.2 +"1986-04-23",11.6 +"1986-04-24",12.0 +"1986-04-25",11.5 +"1986-04-26",8.3 +"1986-04-27",10.5 +"1986-04-28",9.0 +"1986-04-29",6.9 +"1986-04-30",9.4 +"1986-05-01",11.1 +"1986-05-02",9.1 +"1986-05-03",7.7 +"1986-05-04",10.0 +"1986-05-05",10.4 +"1986-05-06",8.0 +"1986-05-07",9.8 +"1986-05-08",12.4 +"1986-05-09",12.9 +"1986-05-10",12.3 +"1986-05-11",6.9 +"1986-05-12",10.5 +"1986-05-13",11.0 +"1986-05-14",9.7 +"1986-05-15",11.1 +"1986-05-16",11.5 +"1986-05-17",13.4 +"1986-05-18",10.9 +"1986-05-19",12.0 +"1986-05-20",12.1 +"1986-05-21",10.4 +"1986-05-22",10.0 +"1986-05-23",9.6 +"1986-05-24",11.3 +"1986-05-25",8.5 +"1986-05-26",6.3 +"1986-05-27",8.2 +"1986-05-28",10.7 +"1986-05-29",10.3 +"1986-05-30",9.5 +"1986-05-31",10.9 +"1986-06-01",10.9 +"1986-06-02",4.3 +"1986-06-03",5.2 +"1986-06-04",11.0 +"1986-06-05",11.6 +"1986-06-06",10.6 +"1986-06-07",9.4 +"1986-06-08",10.0 +"1986-06-09",9.6 +"1986-06-10",9.5 +"1986-06-11",9.7 +"1986-06-12",9.6 +"1986-06-13",7.0 +"1986-06-14",7.0 +"1986-06-15",6.8 +"1986-06-16",6.9 +"1986-06-17",8.0 +"1986-06-18",7.6 +"1986-06-19",8.6 +"1986-06-20",5.7 +"1986-06-21",5.5 +"1986-06-22",5.7 +"1986-06-23",5.7 +"1986-06-24",6.6 +"1986-06-25",6.0 +"1986-06-26",6.9 +"1986-06-27",7.7 +"1986-06-28",8.0 +"1986-06-29",3.9 +"1986-06-30",0.8 +"1986-07-01",2.8 +"1986-07-02",8.0 +"1986-07-03",9.8 +"1986-07-04",11.4 +"1986-07-05",8.6 +"1986-07-06",5.2 +"1986-07-07",6.6 +"1986-07-08",5.7 +"1986-07-09",4.6 +"1986-07-10",5.8 +"1986-07-11",7.0 +"1986-07-12",4.8 +"1986-07-13",4.4 +"1986-07-14",4.4 +"1986-07-15",7.9 +"1986-07-16",10.6 +"1986-07-17",5.0 +"1986-07-18",7.6 +"1986-07-19",9.2 +"1986-07-20",9.7 +"1986-07-21",8.8 +"1986-07-22",6.8 +"1986-07-23",9.4 +"1986-07-24",11.0 +"1986-07-25",2.5 +"1986-07-26",2.1 +"1986-07-27",5.4 +"1986-07-28",6.2 +"1986-07-29",7.8 +"1986-07-30",7.4 +"1986-07-31",9.3 +"1986-08-01",9.3 +"1986-08-02",9.5 +"1986-08-03",8.5 +"1986-08-04",10.0 +"1986-08-05",7.7 +"1986-08-06",9.3 +"1986-08-07",9.1 +"1986-08-08",3.5 +"1986-08-09",3.6 +"1986-08-10",2.5 +"1986-08-11",1.7 +"1986-08-12",2.7 +"1986-08-13",2.9 +"1986-08-14",5.3 +"1986-08-15",7.7 +"1986-08-16",9.1 +"1986-08-17",9.4 +"1986-08-18",7.3 +"1986-08-19",8.4 +"1986-08-20",9.2 +"1986-08-21",6.6 +"1986-08-22",9.7 +"1986-08-23",12.4 +"1986-08-24",10.2 +"1986-08-25",5.9 +"1986-08-26",7.1 +"1986-08-27",7.5 +"1986-08-28",9.7 +"1986-08-29",12.2 +"1986-08-30",5.6 +"1986-08-31",5.4 +"1986-09-01",8.3 +"1986-09-02",10.6 +"1986-09-03",9.1 +"1986-09-04",11.3 +"1986-09-05",10.9 +"1986-09-06",8.9 +"1986-09-07",6.3 +"1986-09-08",9.0 +"1986-09-09",6.1 +"1986-09-10",9.1 +"1986-09-11",9.6 +"1986-09-12",6.0 +"1986-09-13",10.0 +"1986-09-14",11.0 +"1986-09-15",6.2 +"1986-09-16",8.3 +"1986-09-17",11.3 +"1986-09-18",11.3 +"1986-09-19",6.7 +"1986-09-20",6.6 +"1986-09-21",11.4 +"1986-09-22",6.9 +"1986-09-23",10.6 +"1986-09-24",8.6 +"1986-09-25",11.3 +"1986-09-26",12.5 +"1986-09-27",9.9 +"1986-09-28",6.9 +"1986-09-29",5.5 +"1986-09-30",7.8 +"1986-10-01",11.0 +"1986-10-02",16.2 +"1986-10-03",9.9 +"1986-10-04",8.7 +"1986-10-05",10.5 +"1986-10-06",12.2 +"1986-10-07",10.6 +"1986-10-08",8.3 +"1986-10-09",5.5 +"1986-10-10",9.0 +"1986-10-11",6.4 +"1986-10-12",7.2 +"1986-10-13",12.9 +"1986-10-14",12.0 +"1986-10-15",7.3 +"1986-10-16",9.7 +"1986-10-17",8.4 +"1986-10-18",14.7 +"1986-10-19",9.5 +"1986-10-20",7.9 +"1986-10-21",6.8 +"1986-10-22",12.6 +"1986-10-23",5.2 +"1986-10-24",7.5 +"1986-10-25",8.7 +"1986-10-26",7.6 +"1986-10-27",9.0 +"1986-10-28",7.2 +"1986-10-29",10.7 +"1986-10-30",13.1 +"1986-10-31",13.9 +"1986-11-01",10.8 +"1986-11-02",10.4 +"1986-11-03",9.1 +"1986-11-04",16.0 +"1986-11-05",21.0 +"1986-11-06",16.2 +"1986-11-07",8.6 +"1986-11-08",9.2 +"1986-11-09",12.5 +"1986-11-10",9.7 +"1986-11-11",12.5 +"1986-11-12",10.3 +"1986-11-13",12.0 +"1986-11-14",11.0 +"1986-11-15",14.8 +"1986-11-16",15.0 +"1986-11-17",15.3 +"1986-11-18",10.3 +"1986-11-19",10.7 +"1986-11-20",10.5 +"1986-11-21",8.9 +"1986-11-22",8.1 +"1986-11-23",11.5 +"1986-11-24",12.8 +"1986-11-25",9.1 +"1986-11-26",14.6 +"1986-11-27",11.6 +"1986-11-28",11.2 +"1986-11-29",12.6 +"1986-11-30",7.5 +"1986-12-01",11.0 +"1986-12-02",14.5 +"1986-12-03",18.5 +"1986-12-04",15.4 +"1986-12-05",13.1 +"1986-12-06",16.3 +"1986-12-07",20.2 +"1986-12-08",11.5 +"1986-12-09",12.4 +"1986-12-10",10.9 +"1986-12-11",12.7 +"1986-12-12",12.2 +"1986-12-13",12.4 +"1986-12-14",9.8 +"1986-12-15",8.5 +"1986-12-16",14.7 +"1986-12-17",12.0 +"1986-12-18",10.3 +"1986-12-19",11.0 +"1986-12-20",10.2 +"1986-12-21",12.6 +"1986-12-22",11.6 +"1986-12-23",9.7 +"1986-12-24",13.4 +"1986-12-25",10.5 +"1986-12-26",14.7 +"1986-12-27",14.6 +"1986-12-28",14.2 +"1986-12-29",13.2 +"1986-12-30",11.7 +"1986-12-31",17.2 +"1987-01-01",12.3 +"1987-01-02",13.8 +"1987-01-03",15.3 +"1987-01-04",15.6 +"1987-01-05",16.2 +"1987-01-06",16.3 +"1987-01-07",16.8 +"1987-01-08",11.0 +"1987-01-09",8.5 +"1987-01-10",13.2 +"1987-01-11",13.0 +"1987-01-12",12.4 +"1987-01-13",13.0 +"1987-01-14",16.6 +"1987-01-15",12.0 +"1987-01-16",12.4 +"1987-01-17",15.0 +"1987-01-18",11.8 +"1987-01-19",11.6 +"1987-01-20",12.2 +"1987-01-21",13.7 +"1987-01-22",11.2 +"1987-01-23",12.4 +"1987-01-24",11.5 +"1987-01-25",13.8 +"1987-01-26",15.7 +"1987-01-27",12.9 +"1987-01-28",11.5 +"1987-01-29",11.0 +"1987-01-30",12.7 +"1987-01-31",14.9 +"1987-02-01",16.5 +"1987-02-02",12.8 +"1987-02-03",12.7 +"1987-02-04",12.7 +"1987-02-05",11.6 +"1987-02-06",13.3 +"1987-02-07",15.2 +"1987-02-08",16.4 +"1987-02-09",11.9 +"1987-02-10",15.1 +"1987-02-11",10.6 +"1987-02-12",13.6 +"1987-02-13",12.1 +"1987-02-14",16.0 +"1987-02-15",16.8 +"1987-02-16",16.6 +"1987-02-17",15.6 +"1987-02-18",15.2 +"1987-02-19",17.7 +"1987-02-20",21.0 +"1987-02-21",13.4 +"1987-02-22",10.5 +"1987-02-23",9.5 +"1987-02-24",12.0 +"1987-02-25",10.4 +"1987-02-26",11.5 +"1987-02-27",13.2 +"1987-02-28",15.0 +"1987-03-01",14.1 +"1987-03-02",12.4 +"1987-03-03",13.4 +"1987-03-04",12.5 +"1987-03-05",14.3 +"1987-03-06",17.6 +"1987-03-07",10.4 +"1987-03-08",9.9 +"1987-03-09",10.2 +"1987-03-10",11.3 +"1987-03-11",9.5 +"1987-03-12",11.8 +"1987-03-13",11.5 +"1987-03-14",10.5 +"1987-03-15",10.8 +"1987-03-16",13.0 +"1987-03-17",18.5 +"1987-03-18",18.7 +"1987-03-19",15.0 +"1987-03-20",13.0 +"1987-03-21",11.3 +"1987-03-22",13.0 +"1987-03-23",13.3 +"1987-03-24",11.0 +"1987-03-25",10.3 +"1987-03-26",13.0 +"1987-03-27",12.3 +"1987-03-28",15.6 +"1987-03-29",10.2 +"1987-03-30",10.8 +"1987-03-31",12.0 +"1987-04-01",13.3 +"1987-04-02",11.7 +"1987-04-03",12.5 +"1987-04-04",13.7 +"1987-04-05",14.9 +"1987-04-06",20.2 +"1987-04-07",16.3 +"1987-04-08",13.9 +"1987-04-09",10.1 +"1987-04-10",7.3 +"1987-04-11",14.0 +"1987-04-12",17.7 +"1987-04-13",16.3 +"1987-04-14",10.6 +"1987-04-15",9.7 +"1987-04-16",7.8 +"1987-04-17",10.4 +"1987-04-18",10.4 +"1987-04-19",14.1 +"1987-04-20",7.1 +"1987-04-21",8.1 +"1987-04-22",7.8 +"1987-04-23",10.6 +"1987-04-24",9.1 +"1987-04-25",9.0 +"1987-04-26",11.9 +"1987-04-27",17.1 +"1987-04-28",16.8 +"1987-04-29",13.5 +"1987-04-30",11.6 +"1987-05-01",7.0 +"1987-05-02",9.7 +"1987-05-03",9.9 +"1987-05-04",11.2 +"1987-05-05",11.3 +"1987-05-06",11.8 +"1987-05-07",9.9 +"1987-05-08",7.1 +"1987-05-09",9.6 +"1987-05-10",9.8 +"1987-05-11",10.6 +"1987-05-12",12.8 +"1987-05-13",16.5 +"1987-05-14",11.7 +"1987-05-15",12.3 +"1987-05-16",12.2 +"1987-05-17",11.8 +"1987-05-18",10.7 +"1987-05-19",10.2 +"1987-05-20",10.0 +"1987-05-21",8.3 +"1987-05-22",6.6 +"1987-05-23",9.5 +"1987-05-24",12.3 +"1987-05-25",7.6 +"1987-05-26",9.3 +"1987-05-27",5.0 +"1987-05-28",4.3 +"1987-05-29",6.4 +"1987-05-30",10.8 +"1987-05-31",7.8 +"1987-06-01",8.5 +"1987-06-02",9.7 +"1987-06-03",10.0 +"1987-06-04",11.0 +"1987-06-05",10.2 +"1987-06-06",6.6 +"1987-06-07",6.1 +"1987-06-08",5.9 +"1987-06-09",8.9 +"1987-06-10",13.0 +"1987-06-11",12.6 +"1987-06-12",5.4 +"1987-06-13",6.0 +"1987-06-14",7.8 +"1987-06-15",9.0 +"1987-06-16",4.2 +"1987-06-17",3.0 +"1987-06-18",4.5 +"1987-06-19",6.2 +"1987-06-20",11.9 +"1987-06-21",11.8 +"1987-06-22",9.4 +"1987-06-23",9.6 +"1987-06-24",9.4 +"1987-06-25",7.0 +"1987-06-26",8.9 +"1987-06-27",9.3 +"1987-06-28",6.8 +"1987-06-29",7.5 +"1987-06-30",8.0 +"1987-07-01",8.3 +"1987-07-02",2.7 +"1987-07-03",3.9 +"1987-07-04",4.1 +"1987-07-05",5.0 +"1987-07-06",5.8 +"1987-07-07",4.4 +"1987-07-08",4.1 +"1987-07-09",5.8 +"1987-07-10",9.1 +"1987-07-11",7.9 +"1987-07-12",5.0 +"1987-07-13",2.8 +"1987-07-14",4.7 +"1987-07-15",8.9 +"1987-07-16",5.4 +"1987-07-17",7.1 +"1987-07-18",9.0 +"1987-07-19",9.4 +"1987-07-20",6.3 +"1987-07-21",7.0 +"1987-07-22",6.4 +"1987-07-23",6.7 +"1987-07-24",1.5 +"1987-07-25",2.9 +"1987-07-26",4.8 +"1987-07-27",6.3 +"1987-07-28",5.7 +"1987-07-29",7.0 +"1987-07-30",8.8 +"1987-07-31",8.7 +"1987-08-01",9.0 +"1987-08-02",9.6 +"1987-08-03",8.0 +"1987-08-04",8.4 +"1987-08-05",8.1 +"1987-08-06",9.0 +"1987-08-07",5.3 +"1987-08-08",8.9 +"1987-08-09",8.7 +"1987-08-10",4.9 +"1987-08-11",7.0 +"1987-08-12",7.5 +"1987-08-13",7.0 +"1987-08-14",9.1 +"1987-08-15",11.8 +"1987-08-16",9.9 +"1987-08-17",5.6 +"1987-08-18",4.2 +"1987-08-19",4.3 +"1987-08-20",8.0 +"1987-08-21",5.1 +"1987-08-22",9.4 +"1987-08-23",9.1 +"1987-08-24",9.7 +"1987-08-25",10.6 +"1987-08-26",8.6 +"1987-08-27",10.1 +"1987-08-28",11.0 +"1987-08-29",9.7 +"1987-08-30",5.0 +"1987-08-31",6.1 +"1987-09-01",5.4 +"1987-09-02",5.8 +"1987-09-03",7.3 +"1987-09-04",6.3 +"1987-09-05",4.8 +"1987-09-06",7.6 +"1987-09-07",8.1 +"1987-09-08",9.5 +"1987-09-09",10.3 +"1987-09-10",7.0 +"1987-09-11",9.0 +"1987-09-12",10.2 +"1987-09-13",6.8 +"1987-09-14",9.3 +"1987-09-15",9.8 +"1987-09-16",10.7 +"1987-09-17",7.8 +"1987-09-18",9.2 +"1987-09-19",15.0 +"1987-09-20",7.8 +"1987-09-21",5.3 +"1987-09-22",9.5 +"1987-09-23",7.6 +"1987-09-24",14.0 +"1987-09-25",14.9 +"1987-09-26",14.9 +"1987-09-27",19.2 +"1987-09-28",17.0 +"1987-09-29",13.0 +"1987-09-30",11.2 +"1987-10-01",9.5 +"1987-10-02",10.3 +"1987-10-03",9.3 +"1987-10-04",11.3 +"1987-10-05",6.5 +"1987-10-06",12.0 +"1987-10-07",8.3 +"1987-10-08",8.7 +"1987-10-09",8.7 +"1987-10-10",10.2 +"1987-10-11",6.9 +"1987-10-12",4.9 +"1987-10-13",10.0 +"1987-10-14",7.6 +"1987-10-15",14.5 +"1987-10-16",13.2 +"1987-10-17",9.9 +"1987-10-18",10.1 +"1987-10-19",11.3 +"1987-10-20",10.4 +"1987-10-21",10.9 +"1987-10-22",9.2 +"1987-10-23",10.5 +"1987-10-24",11.4 +"1987-10-25",13.5 +"1987-10-26",9.8 +"1987-10-27",13.1 +"1987-10-28",9.7 +"1987-10-29",11.4 +"1987-10-30",9.9 +"1987-10-31",14.4 +"1987-11-01",19.0 +"1987-11-02",23.0 +"1987-11-03",15.4 +"1987-11-04",9.6 +"1987-11-05",10.8 +"1987-11-06",12.1 +"1987-11-07",11.0 +"1987-11-08",12.6 +"1987-11-09",14.7 +"1987-11-10",11.1 +"1987-11-11",10.1 +"1987-11-12",11.4 +"1987-11-13",13.0 +"1987-11-14",11.9 +"1987-11-15",9.5 +"1987-11-16",13.5 +"1987-11-17",15.2 +"1987-11-18",18.4 +"1987-11-19",24.1 +"1987-11-20",14.1 +"1987-11-21",10.7 +"1987-11-22",8.7 +"1987-11-23",13.3 +"1987-11-24",11.6 +"1987-11-25",9.9 +"1987-11-26",10.8 +"1987-11-27",11.5 +"1987-11-28",10.0 +"1987-11-29",13.9 +"1987-11-30",13.6 +"1987-12-01",11.9 +"1987-12-02",11.1 +"1987-12-03",8.2 +"1987-12-04",9.4 +"1987-12-05",12.7 +"1987-12-06",11.6 +"1987-12-07",11.0 +"1987-12-08",11.3 +"1987-12-09",13.4 +"1987-12-10",14.9 +"1987-12-11",15.2 +"1987-12-12",13.9 +"1987-12-13",15.0 +"1987-12-14",16.2 +"1987-12-15",17.7 +"1987-12-16",20.5 +"1987-12-17",14.7 +"1987-12-18",12.5 +"1987-12-19",10.9 +"1987-12-20",12.8 +"1987-12-21",12.7 +"1987-12-22",11.2 +"1987-12-23",11.4 +"1987-12-24",11.2 +"1987-12-25",12.1 +"1987-12-26",12.7 +"1987-12-27",16.2 +"1987-12-28",14.2 +"1987-12-29",14.3 +"1987-12-30",13.3 +"1987-12-31",16.7 +"1988-01-01",15.3 +"1988-01-02",14.3 +"1988-01-03",13.5 +"1988-01-04",15.0 +"1988-01-05",13.6 +"1988-01-06",15.2 +"1988-01-07",17.0 +"1988-01-08",18.7 +"1988-01-09",16.5 +"1988-01-10",17.4 +"1988-01-11",18.3 +"1988-01-12",18.3 +"1988-01-13",22.4 +"1988-01-14",21.4 +"1988-01-15",20.9 +"1988-01-16",17.6 +"1988-01-17",15.5 +"1988-01-18",16.6 +"1988-01-19",16.2 +"1988-01-20",15.6 +"1988-01-21",14.5 +"1988-01-22",14.0 +"1988-01-23",15.6 +"1988-01-24",12.3 +"1988-01-25",11.6 +"1988-01-26",12.6 +"1988-01-27",14.9 +"1988-01-28",17.3 +"1988-01-29",21.4 +"1988-01-30",23.4 +"1988-01-31",14.4 +"1988-02-01",14.1 +"1988-02-02",15.0 +"1988-02-03",14.5 +"1988-02-04",15.1 +"1988-02-05",13.9 +"1988-02-06",13.4 +"1988-02-07",9.2 +"1988-02-08",12.5 +"1988-02-09",15.1 +"1988-02-10",12.1 +"1988-02-11",14.5 +"1988-02-12",16.3 +"1988-02-13",16.5 +"1988-02-14",14.9 +"1988-02-15",13.2 +"1988-02-16",11.8 +"1988-02-17",13.6 +"1988-02-18",16.2 +"1988-02-19",14.1 +"1988-02-20",13.5 +"1988-02-21",15.0 +"1988-02-22",14.8 +"1988-02-23",16.2 +"1988-02-24",16.2 +"1988-02-25",13.3 +"1988-02-26",15.3 +"1988-02-27",18.4 +"1988-02-28",16.2 +"1988-02-29",16.3 +"1988-03-01",12.4 +"1988-03-02",15.6 +"1988-03-03",14.9 +"1988-03-04",14.8 +"1988-03-05",12.7 +"1988-03-06",14.2 +"1988-03-07",16.8 +"1988-03-08",16.7 +"1988-03-09",16.2 +"1988-03-10",14.5 +"1988-03-11",10.0 +"1988-03-12",12.6 +"1988-03-13",11.9 +"1988-03-14",11.8 +"1988-03-15",13.4 +"1988-03-16",14.5 +"1988-03-17",15.7 +"1988-03-18",15.3 +"1988-03-19",13.9 +"1988-03-20",13.7 +"1988-03-21",15.1 +"1988-03-22",15.6 +"1988-03-23",14.4 +"1988-03-24",13.9 +"1988-03-25",16.2 +"1988-03-26",16.7 +"1988-03-27",15.5 +"1988-03-28",16.4 +"1988-03-29",17.5 +"1988-03-30",18.2 +"1988-03-31",16.1 +"1988-04-01",16.5 +"1988-04-02",14.6 +"1988-04-03",16.4 +"1988-04-04",13.6 +"1988-04-05",15.9 +"1988-04-06",11.9 +"1988-04-07",14.7 +"1988-04-08",9.4 +"1988-04-09",6.6 +"1988-04-10",7.9 +"1988-04-11",11.0 +"1988-04-12",15.7 +"1988-04-13",15.2 +"1988-04-14",15.9 +"1988-04-15",10.6 +"1988-04-16",8.3 +"1988-04-17",8.6 +"1988-04-18",12.7 +"1988-04-19",10.5 +"1988-04-20",12.0 +"1988-04-21",11.1 +"1988-04-22",13.0 +"1988-04-23",12.4 +"1988-04-24",13.3 +"1988-04-25",15.9 +"1988-04-26",12.0 +"1988-04-27",13.7 +"1988-04-28",17.6 +"1988-04-29",14.3 +"1988-04-30",13.7 +"1988-05-01",15.2 +"1988-05-02",14.5 +"1988-05-03",14.9 +"1988-05-04",15.5 +"1988-05-05",16.4 +"1988-05-06",14.5 +"1988-05-07",12.6 +"1988-05-08",13.6 +"1988-05-09",11.2 +"1988-05-10",11.0 +"1988-05-11",12.0 +"1988-05-12",6.8 +"1988-05-13",10.6 +"1988-05-14",13.1 +"1988-05-15",13.5 +"1988-05-16",11.7 +"1988-05-17",13.2 +"1988-05-18",12.0 +"1988-05-19",10.4 +"1988-05-20",10.0 +"1988-05-21",8.2 +"1988-05-22",9.4 +"1988-05-23",10.3 +"1988-05-24",8.1 +"1988-05-25",8.7 +"1988-05-26",12.6 +"1988-05-27",10.9 +"1988-05-28",8.7 +"1988-05-29",9.3 +"1988-05-30",6.3 +"1988-05-31",7.8 +"1988-06-01",10.0 +"1988-06-02",11.0 +"1988-06-03",11.1 +"1988-06-04",12.6 +"1988-06-05",10.2 +"1988-06-06",11.1 +"1988-06-07",8.7 +"1988-06-08",9.5 +"1988-06-09",9.7 +"1988-06-10",8.2 +"1988-06-11",5.0 +"1988-06-12",6.5 +"1988-06-13",12.1 +"1988-06-14",8.9 +"1988-06-15",6.1 +"1988-06-16",2.8 +"1988-06-17",3.7 +"1988-06-18",6.8 +"1988-06-19",6.6 +"1988-06-20",7.0 +"1988-06-21",7.3 +"1988-06-22",7.9 +"1988-06-23",10.6 +"1988-06-24",8.1 +"1988-06-25",6.7 +"1988-06-26",8.0 +"1988-06-27",10.0 +"1988-06-28",6.7 +"1988-06-29",9.4 +"1988-06-30",9.3 +"1988-07-01",6.0 +"1988-07-02",5.8 +"1988-07-03",4.9 +"1988-07-04",5.0 +"1988-07-05",8.4 +"1988-07-06",12.3 +"1988-07-07",13.0 +"1988-07-08",11.4 +"1988-07-09",6.8 +"1988-07-10",7.6 +"1988-07-11",12.4 +"1988-07-12",7.1 +"1988-07-13",7.5 +"1988-07-14",10.0 +"1988-07-15",5.3 +"1988-07-16",6.3 +"1988-07-17",8.0 +"1988-07-18",8.3 +"1988-07-19",9.3 +"1988-07-20",9.5 +"1988-07-21",5.6 +"1988-07-22",7.0 +"1988-07-23",8.5 +"1988-07-24",8.5 +"1988-07-25",8.2 +"1988-07-26",8.5 +"1988-07-27",9.6 +"1988-07-28",9.7 +"1988-07-29",7.1 +"1988-07-30",8.4 +"1988-07-31",9.2 +"1988-08-01",9.8 +"1988-08-02",8.1 +"1988-08-03",9.4 +"1988-08-04",10.0 +"1988-08-05",5.1 +"1988-08-06",6.7 +"1988-08-07",6.9 +"1988-08-08",6.8 +"1988-08-09",8.6 +"1988-08-10",9.1 +"1988-08-11",3.9 +"1988-08-12",4.8 +"1988-08-13",8.4 +"1988-08-14",11.6 +"1988-08-15",12.1 +"1988-08-16",12.4 +"1988-08-17",10.0 +"1988-08-18",10.1 +"1988-08-19",9.7 +"1988-08-20",11.7 +"1988-08-21",7.9 +"1988-08-22",8.6 +"1988-08-23",7.7 +"1988-08-24",5.8 +"1988-08-25",8.7 +"1988-08-26",10.6 +"1988-08-27",6.7 +"1988-08-28",8.8 +"1988-08-29",9.7 +"1988-08-30",9.0 +"1988-08-31",11.8 +"1988-09-01",15.2 +"1988-09-02",10.0 +"1988-09-03",10.5 +"1988-09-04",5.5 +"1988-09-05",9.4 +"1988-09-06",8.8 +"1988-09-07",5.3 +"1988-09-08",13.0 +"1988-09-09",15.2 +"1988-09-10",13.2 +"1988-09-11",11.5 +"1988-09-12",6.8 +"1988-09-13",4.7 +"1988-09-14",5.2 +"1988-09-15",6.8 +"1988-09-16",10.7 +"1988-09-17",10.1 +"1988-09-18",10.0 +"1988-09-19",9.8 +"1988-09-20",5.5 +"1988-09-21",13.5 +"1988-09-22",16.6 +"1988-09-23",8.4 +"1988-09-24",8.2 +"1988-09-25",11.1 +"1988-09-26",10.8 +"1988-09-27",8.8 +"1988-09-28",10.8 +"1988-09-29",8.7 +"1988-09-30",12.4 +"1988-10-01",9.0 +"1988-10-02",13.5 +"1988-10-03",14.7 +"1988-10-04",10.9 +"1988-10-05",8.5 +"1988-10-06",6.0 +"1988-10-07",12.7 +"1988-10-08",11.1 +"1988-10-09",8.7 +"1988-10-10",12.3 +"1988-10-11",13.3 +"1988-10-12",5.6 +"1988-10-13",13.7 +"1988-10-14",8.5 +"1988-10-15",11.2 +"1988-10-16",8.7 +"1988-10-17",11.7 +"1988-10-18",12.5 +"1988-10-19",8.2 +"1988-10-20",15.6 +"1988-10-21",10.3 +"1988-10-22",11.4 +"1988-10-23",9.7 +"1988-10-24",6.3 +"1988-10-25",14.3 +"1988-10-26",11.3 +"1988-10-27",7.3 +"1988-10-28",12.8 +"1988-10-29",11.9 +"1988-10-30",14.3 +"1988-10-31",11.6 +"1988-11-01",13.2 +"1988-11-02",15.5 +"1988-11-03",14.1 +"1988-11-04",9.5 +"1988-11-05",7.2 +"1988-11-06",11.8 +"1988-11-07",16.8 +"1988-11-08",12.5 +"1988-11-09",9.4 +"1988-11-10",11.9 +"1988-11-11",10.3 +"1988-11-12",16.9 +"1988-11-13",17.5 +"1988-11-14",7.5 +"1988-11-15",8.6 +"1988-11-16",11.1 +"1988-11-17",11.5 +"1988-11-18",10.7 +"1988-11-19",15.7 +"1988-11-20",12.8 +"1988-11-21",13.0 +"1988-11-22",12.9 +"1988-11-23",14.3 +"1988-11-24",13.7 +"1988-11-25",12.1 +"1988-11-26",11.9 +"1988-11-27",11.8 +"1988-11-28",11.4 +"1988-11-29",10.3 +"1988-11-30",11.7 +"1988-12-01",12.0 +"1988-12-02",17.4 +"1988-12-03",16.8 +"1988-12-04",16.2 +"1988-12-05",13.0 +"1988-12-06",12.5 +"1988-12-07",12.4 +"1988-12-08",16.1 +"1988-12-09",20.2 +"1988-12-10",14.3 +"1988-12-11",11.0 +"1988-12-12",14.4 +"1988-12-13",15.7 +"1988-12-14",19.7 +"1988-12-15",20.7 +"1988-12-16",23.9 +"1988-12-17",16.6 +"1988-12-18",17.5 +"1988-12-19",14.9 +"1988-12-20",13.6 +"1988-12-21",11.9 +"1988-12-22",15.2 +"1988-12-23",17.3 +"1988-12-24",19.8 +"1988-12-25",15.8 +"1988-12-26",9.5 +"1988-12-27",12.9 +"1988-12-28",12.9 +"1988-12-29",14.8 +"1988-12-30",14.1 +"1989-01-01",14.3 +"1989-01-02",17.4 +"1989-01-03",18.5 +"1989-01-04",16.8 +"1989-01-05",11.5 +"1989-01-06",9.5 +"1989-01-07",12.2 +"1989-01-08",15.7 +"1989-01-09",16.3 +"1989-01-10",13.6 +"1989-01-11",12.6 +"1989-01-12",13.8 +"1989-01-13",12.1 +"1989-01-14",13.4 +"1989-01-15",17.3 +"1989-01-16",19.4 +"1989-01-17",16.6 +"1989-01-18",13.9 +"1989-01-19",13.1 +"1989-01-20",16.0 +"1989-01-21",14.5 +"1989-01-22",15.0 +"1989-01-23",12.6 +"1989-01-24",12.5 +"1989-01-25",15.2 +"1989-01-26",16.2 +"1989-01-27",16.5 +"1989-01-28",20.1 +"1989-01-29",20.6 +"1989-01-30",16.9 +"1989-01-31",16.5 +"1989-02-01",16.1 +"1989-02-02",14.4 +"1989-02-03",16.3 +"1989-02-04",15.7 +"1989-02-05",14.2 +"1989-02-06",13.2 +"1989-02-07",16.8 +"1989-02-08",18.5 +"1989-02-09",16.7 +"1989-02-10",15.3 +"1989-02-11",15.9 +"1989-02-12",15.2 +"1989-02-13",17.5 +"1989-02-14",18.3 +"1989-02-15",19.4 +"1989-02-16",19.4 +"1989-02-17",19.5 +"1989-02-18",20.5 +"1989-02-19",15.7 +"1989-02-20",15.0 +"1989-02-21",16.1 +"1989-02-22",14.3 +"1989-02-23",13.0 +"1989-02-24",16.2 +"1989-02-25",17.7 +"1989-02-26",13.2 +"1989-02-27",15.8 +"1989-02-28",18.5 +"1989-03-01",20.4 +"1989-03-02",22.0 +"1989-03-03",19.7 +"1989-03-04",19.6 +"1989-03-05",20.3 +"1989-03-06",18.3 +"1989-03-07",18.9 +"1989-03-08",20.3 +"1989-03-09",21.4 +"1989-03-10",18.3 +"1989-03-11",17.8 +"1989-03-12",17.7 +"1989-03-13",12.8 +"1989-03-14",15.1 +"1989-03-15",15.0 +"1989-03-16",14.8 +"1989-03-17",12.0 +"1989-03-18",12.5 +"1989-03-19",15.0 +"1989-03-20",17.1 +"1989-03-21",17.3 +"1989-03-22",16.9 +"1989-03-23",16.5 +"1989-03-24",13.6 +"1989-03-25",13.2 +"1989-03-26",9.4 +"1989-03-27",9.5 +"1989-03-28",11.8 +"1989-03-29",10.4 +"1989-03-30",9.7 +"1989-03-31",12.6 +"1989-04-01",13.3 +"1989-04-02",15.1 +"1989-04-03",14.2 +"1989-04-04",14.2 +"1989-04-05",19.2 +"1989-04-06",12.6 +"1989-04-07",14.2 +"1989-04-08",11.9 +"1989-04-09",13.9 +"1989-04-10",13.5 +"1989-04-11",15.3 +"1989-04-12",13.9 +"1989-04-13",14.0 +"1989-04-14",12.9 +"1989-04-15",8.5 +"1989-04-16",11.4 +"1989-04-17",10.9 +"1989-04-18",12.0 +"1989-04-19",8.6 +"1989-04-20",9.0 +"1989-04-21",9.6 +"1989-04-22",10.2 +"1989-04-23",9.8 +"1989-04-24",8.3 +"1989-04-25",11.0 +"1989-04-26",11.9 +"1989-04-27",14.0 +"1989-04-28",15.8 +"1989-04-29",14.5 +"1989-04-30",13.2 +"1989-05-01",14.2 +"1989-05-02",14.6 +"1989-05-03",11.8 +"1989-05-04",14.4 +"1989-05-05",10.4 +"1989-05-06",10.3 +"1989-05-07",10.8 +"1989-05-08",10.5 +"1989-05-09",9.5 +"1989-05-10",12.5 +"1989-05-11",13.7 +"1989-05-12",12.7 +"1989-05-13",11.9 +"1989-05-14",11.4 +"1989-05-15",9.7 +"1989-05-16",8.3 +"1989-05-17",8.1 +"1989-05-18",11.7 +"1989-05-19",11.6 +"1989-05-20",7.4 +"1989-05-21",5.2 +"1989-05-22",11.0 +"1989-05-23",9.5 +"1989-05-24",9.2 +"1989-05-25",10.7 +"1989-05-26",9.0 +"1989-05-27",10.2 +"1989-05-28",10.3 +"1989-05-29",12.1 +"1989-05-30",13.2 +"1989-05-31",6.6 +"1989-06-01",2.3 +"1989-06-02",1.4 +"1989-06-03",2.1 +"1989-06-04",6.6 +"1989-06-05",8.9 +"1989-06-06",7.8 +"1989-06-07",9.0 +"1989-06-08",10.3 +"1989-06-09",7.9 +"1989-06-10",7.2 +"1989-06-11",8.6 +"1989-06-12",8.8 +"1989-06-13",6.2 +"1989-06-14",9.5 +"1989-06-15",10.2 +"1989-06-16",9.7 +"1989-06-17",11.2 +"1989-06-18",10.2 +"1989-06-19",10.1 +"1989-06-20",8.1 +"1989-06-21",6.6 +"1989-06-22",5.0 +"1989-06-23",4.7 +"1989-06-24",5.3 +"1989-06-25",4.5 +"1989-06-26",2.3 +"1989-06-27",1.4 +"1989-06-28",0.5 +"1989-06-29",2.4 +"1989-06-30",8.0 +"1989-07-01",6.0 +"1989-07-02",7.1 +"1989-07-03",9.7 +"1989-07-04",6.9 +"1989-07-05",5.3 +"1989-07-06",7.0 +"1989-07-07",6.2 +"1989-07-08",7.0 +"1989-07-09",9.7 +"1989-07-10",8.0 +"1989-07-11",8.5 +"1989-07-12",7.1 +"1989-07-13",7.5 +"1989-07-14",3.3 +"1989-07-15",1.8 +"1989-07-16",2.6 +"1989-07-17",5.3 +"1989-07-18",5.8 +"1989-07-19",5.8 +"1989-07-20",7.2 +"1989-07-21",5.3 +"1989-07-22",1.6 +"1989-07-23",3.1 +"1989-07-24",5.3 +"1989-07-25",7.7 +"1989-07-26",4.2 +"1989-07-27",5.5 +"1989-07-28",9.0 +"1989-07-29",11.2 +"1989-07-30",8.0 +"1989-07-31",7.6 +"1989-08-01",3.7 +"1989-08-02",7.5 +"1989-08-03",8.1 +"1989-08-04",8.4 +"1989-08-05",7.1 +"1989-08-06",7.6 +"1989-08-07",7.6 +"1989-08-08",5.6 +"1989-08-09",7.0 +"1989-08-10",10.5 +"1989-08-11",7.3 +"1989-08-12",7.8 +"1989-08-13",5.8 +"1989-08-14",3.8 +"1989-08-15",5.8 +"1989-08-16",6.7 +"1989-08-17",6.6 +"1989-08-18",6.6 +"1989-08-19",9.0 +"1989-08-20",8.1 +"1989-08-21",5.1 +"1989-08-22",8.6 +"1989-08-23",7.0 +"1989-08-24",5.5 +"1989-08-25",7.4 +"1989-08-26",6.2 +"1989-08-27",4.2 +"1989-08-28",6.3 +"1989-08-29",7.0 +"1989-08-30",4.0 +"1989-08-31",8.0 +"1989-09-01",8.8 +"1989-09-02",8.8 +"1989-09-03",6.1 +"1989-09-04",8.6 +"1989-09-05",8.9 +"1989-09-06",7.8 +"1989-09-07",5.0 +"1989-09-08",7.0 +"1989-09-09",13.3 +"1989-09-10",7.9 +"1989-09-11",7.5 +"1989-09-12",8.3 +"1989-09-13",7.2 +"1989-09-14",6.5 +"1989-09-15",8.9 +"1989-09-16",7.4 +"1989-09-17",9.9 +"1989-09-18",9.3 +"1989-09-19",10.6 +"1989-09-20",8.6 +"1989-09-21",7.2 +"1989-09-22",12.6 +"1989-09-23",7.8 +"1989-09-24",6.3 +"1989-09-25",9.2 +"1989-09-26",5.8 +"1989-09-27",9.0 +"1989-09-28",5.0 +"1989-09-29",11.9 +"1989-09-30",13.4 +"1989-10-01",10.5 +"1989-10-02",6.2 +"1989-10-03",5.1 +"1989-10-04",9.5 +"1989-10-05",11.7 +"1989-10-06",9.2 +"1989-10-07",7.3 +"1989-10-08",9.7 +"1989-10-09",9.4 +"1989-10-10",10.0 +"1989-10-11",10.9 +"1989-10-12",11.0 +"1989-10-13",10.9 +"1989-10-14",8.0 +"1989-10-15",11.2 +"1989-10-16",7.5 +"1989-10-17",7.2 +"1989-10-18",13.2 +"1989-10-19",12.9 +"1989-10-20",9.4 +"1989-10-21",10.2 +"1989-10-22",9.5 +"1989-10-23",12.4 +"1989-10-24",10.2 +"1989-10-25",13.4 +"1989-10-26",11.6 +"1989-10-27",8.0 +"1989-10-28",9.0 +"1989-10-29",9.3 +"1989-10-30",13.5 +"1989-10-31",8.0 +"1989-11-01",8.1 +"1989-11-02",10.0 +"1989-11-03",8.5 +"1989-11-04",12.5 +"1989-11-05",15.0 +"1989-11-06",13.3 +"1989-11-07",11.0 +"1989-11-08",11.9 +"1989-11-09",8.3 +"1989-11-10",9.7 +"1989-11-11",11.3 +"1989-11-12",12.5 +"1989-11-13",9.4 +"1989-11-14",11.4 +"1989-11-15",13.2 +"1989-11-16",13.8 +"1989-11-17",16.0 +"1989-11-18",10.9 +"1989-11-19",11.9 +"1989-11-20",12.4 +"1989-11-21",13.2 +"1989-11-22",15.5 +"1989-11-23",21.6 +"1989-11-24",14.9 +"1989-11-25",14.4 +"1989-11-26",12.9 +"1989-11-27",13.1 +"1989-11-28",14.0 +"1989-11-29",17.9 +"1989-11-30",17.7 +"1989-12-01",16.3 +"1989-12-02",18.3 +"1989-12-03",13.7 +"1989-12-04",13.3 +"1989-12-05",10.6 +"1989-12-06",14.1 +"1989-12-07",16.0 +"1989-12-08",16.5 +"1989-12-09",14.1 +"1989-12-10",18.7 +"1989-12-11",16.2 +"1989-12-12",14.8 +"1989-12-13",12.6 +"1989-12-14",10.4 +"1989-12-15",12.2 +"1989-12-16",12.6 +"1989-12-17",12.1 +"1989-12-18",17.3 +"1989-12-19",16.4 +"1989-12-20",12.6 +"1989-12-21",12.3 +"1989-12-22",11.8 +"1989-12-23",12.0 +"1989-12-24",12.7 +"1989-12-25",16.4 +"1989-12-26",16.0 +"1989-12-27",13.3 +"1989-12-28",11.7 +"1989-12-29",10.4 +"1989-12-30",14.4 +"1989-12-31",12.7 +"1990-01-01",14.8 +"1990-01-02",13.3 +"1990-01-03",15.6 +"1990-01-04",14.5 +"1990-01-05",14.3 +"1990-01-06",15.3 +"1990-01-07",16.4 +"1990-01-08",14.8 +"1990-01-09",17.4 +"1990-01-10",18.8 +"1990-01-11",22.1 +"1990-01-12",19.0 +"1990-01-13",15.5 +"1990-01-14",15.8 +"1990-01-15",14.7 +"1990-01-16",10.7 +"1990-01-17",11.5 +"1990-01-18",15.0 +"1990-01-19",14.5 +"1990-01-20",14.5 +"1990-01-21",13.3 +"1990-01-22",14.3 +"1990-01-23",14.3 +"1990-01-24",20.5 +"1990-01-25",15.0 +"1990-01-26",17.1 +"1990-01-27",16.9 +"1990-01-28",16.9 +"1990-01-29",13.6 +"1990-01-30",16.4 +"1990-01-31",16.1 +"1990-02-01",12.0 +"1990-02-02",12.2 +"1990-02-03",14.8 +"1990-02-04",14.8 +"1990-02-05",14.4 +"1990-02-06",12.9 +"1990-02-07",13.4 +"1990-02-08",15.9 +"1990-02-09",16.1 +"1990-02-10",17.6 +"1990-02-11",15.6 +"1990-02-12",15.0 +"1990-02-13",13.0 +"1990-02-14",14.1 +"1990-02-15",17.3 +"1990-02-16",15.7 +"1990-02-17",18.6 +"1990-02-18",12.7 +"1990-02-19",14.0 +"1990-02-20",13.7 +"1990-02-21",16.3 +"1990-02-22",20.0 +"1990-02-23",17.0 +"1990-02-24",15.2 +"1990-02-25",16.5 +"1990-02-26",16.5 +"1990-02-27",17.3 +"1990-02-28",19.1 +"1990-03-01",19.3 +"1990-03-02",17.3 +"1990-03-03",19.0 +"1990-03-04",19.8 +"1990-03-05",19.3 +"1990-03-06",17.2 +"1990-03-07",14.2 +"1990-03-08",10.3 +"1990-03-09",13.0 +"1990-03-10",15.3 +"1990-03-11",15.0 +"1990-03-12",12.1 +"1990-03-13",9.2 +"1990-03-14",11.0 +"1990-03-15",15.0 +"1990-03-16",11.6 +"1990-03-17",11.6 +"1990-03-18",15.1 +"1990-03-19",15.0 +"1990-03-20",13.6 +"1990-03-21",12.5 +"1990-03-22",14.3 +"1990-03-23",16.0 +"1990-03-24",17.4 +"1990-03-25",16.9 +"1990-03-26",18.0 +"1990-03-27",20.6 +"1990-03-28",14.2 +"1990-03-29",10.9 +"1990-03-30",11.9 +"1990-03-31",13.3 +"1990-04-01",15.3 +"1990-04-02",14.7 +"1990-04-03",11.0 +"1990-04-04",12.2 +"1990-04-05",14.2 +"1990-04-06",17.0 +"1990-04-07",15.8 +"1990-04-08",15.2 +"1990-04-09",15.1 +"1990-04-10",14.7 +"1990-04-11",18.5 +"1990-04-12",16.4 +"1990-04-13",18.4 +"1990-04-14",15.1 +"1990-04-15",9.9 +"1990-04-16",10.2 +"1990-04-17",12.6 +"1990-04-18",13.2 +"1990-04-19",11.5 +"1990-04-20",13.8 +"1990-04-21",14.5 +"1990-04-22",14.7 +"1990-04-23",11.2 +"1990-04-24",12.7 +"1990-04-25",13.7 +"1990-04-26",11.5 +"1990-04-27",10.4 +"1990-04-28",8.9 +"1990-04-29",11.1 +"1990-04-30",9.5 +"1990-05-01",13.0 +"1990-05-02",13.9 +"1990-05-03",12.6 +"1990-05-04",14.3 +"1990-05-05",16.0 +"1990-05-06",13.3 +"1990-05-07",7.0 +"1990-05-08",4.9 +"1990-05-09",6.9 +"1990-05-10",13.7 +"1990-05-11",10.6 +"1990-05-12",12.3 +"1990-05-13",11.1 +"1990-05-14",10.2 +"1990-05-15",9.5 +"1990-05-16",8.9 +"1990-05-17",13.4 +"1990-05-18",9.1 +"1990-05-19",9.4 +"1990-05-20",8.7 +"1990-05-21",5.8 +"1990-05-22",4.5 +"1990-05-23",7.2 +"1990-05-24",10.0 +"1990-05-25",10.5 +"1990-05-26",10.7 +"1990-05-27",8.2 +"1990-05-28",6.1 +"1990-05-29",4.5 +"1990-05-30",6.1 +"1990-05-31",9.8 +"1990-06-01",9.7 +"1990-06-02",8.2 +"1990-06-03",8.4 +"1990-06-04",8.5 +"1990-06-05",10.4 +"1990-06-06",6.8 +"1990-06-07",6.0 +"1990-06-08",6.6 +"1990-06-09",7.8 +"1990-06-10",10.3 +"1990-06-11",7.2 +"1990-06-12",7.4 +"1990-06-13",11.4 +"1990-06-14",5.4 +"1990-06-15",4.4 +"1990-06-16",6.4 +"1990-06-17",9.3 +"1990-06-18",7.7 +"1990-06-19",8.1 +"1990-06-20",8.3 +"1990-06-21",9.1 +"1990-06-22",7.7 +"1990-06-23",10.6 +"1990-06-24",8.2 +"1990-06-25",7.9 +"1990-06-26",5.2 +"1990-06-27",5.9 +"1990-06-28",3.7 +"1990-06-29",5.6 +"1990-06-30",9.4 +"1990-07-01",7.4 +"1990-07-02",7.3 +"1990-07-03",7.7 +"1990-07-04",7.7 +"1990-07-05",9.3 +"1990-07-06",4.4 +"1990-07-07",5.7 +"1990-07-08",10.2 +"1990-07-09",10.2 +"1990-07-10",9.3 +"1990-07-11",5.4 +"1990-07-12",5.0 +"1990-07-13",7.6 +"1990-07-14",9.6 +"1990-07-15",10.4 +"1990-07-16",11.2 +"1990-07-17",9.1 +"1990-07-18",11.2 +"1990-07-19",6.8 +"1990-07-20",8.3 +"1990-07-21",9.7 +"1990-07-22",9.6 +"1990-07-23",9.8 +"1990-07-24",10.8 +"1990-07-25",9.2 +"1990-07-26",6.5 +"1990-07-27",8.1 +"1990-07-28",7.3 +"1990-07-29",7.9 +"1990-07-30",6.0 +"1990-07-31",5.0 +"1990-08-01",6.8 +"1990-08-02",9.8 +"1990-08-03",5.7 +"1990-08-04",8.6 +"1990-08-05",10.6 +"1990-08-06",7.8 +"1990-08-07",7.7 +"1990-08-08",8.6 +"1990-08-09",6.5 +"1990-08-10",6.9 +"1990-08-11",6.4 +"1990-08-12",8.5 +"1990-08-13",7.8 +"1990-08-14",9.3 +"1990-08-15",8.4 +"1990-08-16",7.8 +"1990-08-17",7.4 +"1990-08-18",7.7 +"1990-08-19",8.9 +"1990-08-20",9.7 +"1990-08-21",9.9 +"1990-08-22",6.1 +"1990-08-23",6.6 +"1990-08-24",7.6 +"1990-08-25",7.4 +"1990-08-26",8.0 +"1990-08-27",2.1 +"1990-08-28",5.9 +"1990-08-29",11.6 +"1990-08-30",8.6 +"1990-08-31",7.9 +"1990-09-01",6.0 +"1990-09-02",9.5 +"1990-09-03",8.6 +"1990-09-04",7.6 +"1990-09-05",10.4 +"1990-09-06",10.3 +"1990-09-07",7.5 +"1990-09-08",3.0 +"1990-09-09",5.3 +"1990-09-10",10.5 +"1990-09-11",14.6 +"1990-09-12",12.6 +"1990-09-13",9.8 +"1990-09-14",7.2 +"1990-09-15",10.1 +"1990-09-16",10.4 +"1990-09-17",3.7 +"1990-09-18",7.3 +"1990-09-19",11.6 +"1990-09-20",16.3 +"1990-09-21",9.6 +"1990-09-22",6.8 +"1990-09-23",5.2 +"1990-09-24",10.6 +"1990-09-25",16.3 +"1990-09-26",9.8 +"1990-09-27",4.6 +"1990-09-28",11.1 +"1990-09-29",8.7 +"1990-09-30",10.0 +"1990-10-01",11.3 +"1990-10-02",10.5 +"1990-10-03",9.9 +"1990-10-04",11.0 +"1990-10-05",14.0 +"1990-10-06",9.2 +"1990-10-07",9.8 +"1990-10-08",6.0 +"1990-10-09",9.8 +"1990-10-10",9.2 +"1990-10-11",11.8 +"1990-10-12",10.3 +"1990-10-13",7.5 +"1990-10-14",7.7 +"1990-10-15",15.8 +"1990-10-16",14.6 +"1990-10-17",10.5 +"1990-10-18",11.3 +"1990-10-19",10.9 +"1990-10-20",6.4 +"1990-10-21",10.9 +"1990-10-22",9.0 +"1990-10-23",10.9 +"1990-10-24",12.4 +"1990-10-25",11.6 +"1990-10-26",13.3 +"1990-10-27",14.4 +"1990-10-28",18.4 +"1990-10-29",13.6 +"1990-10-30",14.9 +"1990-10-31",14.8 +"1990-11-01",15.4 +"1990-11-02",11.8 +"1990-11-03",13.0 +"1990-11-04",11.1 +"1990-11-05",12.5 +"1990-11-06",18.3 +"1990-11-07",19.2 +"1990-11-08",15.4 +"1990-11-09",13.1 +"1990-11-10",11.5 +"1990-11-11",8.6 +"1990-11-12",12.6 +"1990-11-13",13.8 +"1990-11-14",14.6 +"1990-11-15",13.2 +"1990-11-16",12.3 +"1990-11-17",8.8 +"1990-11-18",10.7 +"1990-11-19",9.9 +"1990-11-20",8.3 +"1990-11-21",15.0 +"1990-11-22",12.2 +"1990-11-23",10.5 +"1990-11-24",11.1 +"1990-11-25",13.0 +"1990-11-26",12.9 +"1990-11-27",8.8 +"1990-11-28",14.7 +"1990-11-29",14.7 +"1990-11-30",12.7 +"1990-12-01",13.3 +"1990-12-02",13.2 +"1990-12-03",16.2 +"1990-12-04",17.3 +"1990-12-05",20.5 +"1990-12-06",20.2 +"1990-12-07",19.4 +"1990-12-08",15.5 +"1990-12-09",14.1 +"1990-12-10",11.0 +"1990-12-11",11.1 +"1990-12-12",14.0 +"1990-12-13",11.4 +"1990-12-14",12.5 +"1990-12-15",13.4 +"1990-12-16",13.6 +"1990-12-17",13.9 +"1990-12-18",17.2 +"1990-12-19",14.7 +"1990-12-20",15.4 +"1990-12-21",13.1 +"1990-12-22",13.2 +"1990-12-23",13.9 +"1990-12-24",10.0 +"1990-12-25",12.9 +"1990-12-26",14.6 +"1990-12-27",14.0 +"1990-12-28",13.6 +"1990-12-29",13.5 +"1990-12-30",15.7 +"1990-12-31",13.0 + +Daily minimum temperatures in Melbourne, Australia, 1981-1990 + diff --git a/tf2.0/exercises.txt b/tf2.0/exercises.txt new file mode 100644 index 00000000..6fdee299 --- /dev/null +++ b/tf2.0/exercises.txt @@ -0,0 +1,36 @@ +Logistic Regression +https://www.kaggle.com/uciml/pima-indians-diabetes-database +https://lazyprogrammer.me/course_files/exercises/diabetes.csv + +Linear Regression +https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html +https://lazyprogrammer.me/course_files/exercises/boston.txt + +ANN +https://archive.ics.uci.edu/ml/datasets/ecoli (orig) +https://www.kaggle.com/elikplim/ecoli-data-set (alt) +https://lazyprogrammer.me/course_files/exercises/ecoli.csv + +CNN +https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge +https://archive.org/download/fer2013_202311/fer2013.csv + +RNN +Find your own stock price dataset! + +NLP +https://www.kaggle.com/crowdflower/twitter-airline-sentiment +https://lazyprogrammer.me/course_files/exercises/AirlineSentimentTweets.csv + +Recommender Systems +http://www2.informatik.uni-freiburg.de/~cziegler/BX/ +http://lazyprogrammer.me/course_files/exercises/BX-CSV-Dump.zip + +Transfer Learning +https://www.kaggle.com/c/dogs-vs-cats + +GAN +https://www.kaggle.com/c/dogs-vs-cats + +DeepRL +Find your own stock price dataset! \ No newline at end of file diff --git a/tf2.0/extra_reading.txt b/tf2.0/extra_reading.txt new file mode 100644 index 00000000..a23d273c --- /dev/null +++ b/tf2.0/extra_reading.txt @@ -0,0 +1,36 @@ +Gradient Descent: Convergence Analysis +http://www.stat.cmu.edu/~ryantibs/convexopt-F13/scribes/lec6.pdf + +Deep learning improved by biological activation functions +https://arxiv.org/pdf/1804.11237.pdf + +Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift +Sergey Ioffe, Christian Szegedy +https://arxiv.org/abs/1502.03167 + +Dropout: A Simple Way to Prevent Neural Networks from Overfitting +https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf + +Convolution arithmetic tutorial +https://theano-pymc.readthedocs.io/en/latest/tutorial/conv_arithmetic.html + +On the Practical Computational Power of Finite Precision RNNs for Language Recognition +https://arxiv.org/abs/1805.04908 + +Massive Exploration of Neural Machine Translation Architectures +https://arxiv.org/abs/1703.03906 + +Practical Deep Reinforcement Learning Approach for Stock Trading +https://arxiv.org/abs/1811.07522 + +Inceptionism: Going Deeper into Neural Networks +https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html + +The Loss Surfaces of Multilayer Networks +https://arxiv.org/pdf/1412.0233.pdf + +Tensorflow Developer Certificate Installation Guide +https://www.tensorflow.org/static/extras/cert/Setting_Up_TF_Developer_Certificate_Exam.pdf + +Tensorflow Developer Certificate Candidate Handbook +https://www.tensorflow.org/extras/cert/TF_Certificate_Candidate_Handbook.pdf diff --git a/tf2.0/fake_util.py b/tf2.0/fake_util.py new file mode 100644 index 00000000..49dd3a3f --- /dev/null +++ b/tf2.0/fake_util.py @@ -0,0 +1,4 @@ +# Used for an example only + +def my_useful_function(): + print("hello world") \ No newline at end of file diff --git a/tf2.0/keras_trader.py b/tf2.0/keras_trader.py new file mode 100644 index 00000000..18545989 --- /dev/null +++ b/tf2.0/keras_trader.py @@ -0,0 +1,421 @@ +import numpy as np +import pandas as pd + +# must do this BEFORE importing keras +import os +os.environ["KERAS_BACKEND"] = "jax" + +from keras.models import Model +from keras.layers import Dense, Input +from keras.optimizers import Adam + +from datetime import datetime +import itertools +import argparse +import re +import pickle + +from sklearn.preprocessing import StandardScaler + + +import keras.backend as K +print("Using backend:", K.backend()) + +# import tensorflow as tf +# if tf.__version__.startswith('2'): +# tf.compat.v1.disable_eager_execution() + + + +# Let's use AAPL (Apple), MSI (Motorola), SBUX (Starbucks) +def get_data(): + # returns a T x 3 list of stock prices + # each row is a different stock + # 0 = AAPL + # 1 = MSI + # 2 = SBUX + df = pd.read_csv('aapl_msi_sbux.csv') + return df.values + + + +### The experience replay memory ### +class ReplayBuffer: + def __init__(self, obs_dim, act_dim, size): + self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.acts_buf = np.zeros(size, dtype=np.uint8) + self.rews_buf = np.zeros(size, dtype=np.float32) + self.done_buf = np.zeros(size, dtype=np.uint8) + self.ptr, self.size, self.max_size = 0, 0, size + + def store(self, obs, act, rew, next_obs, done): + self.obs1_buf[self.ptr] = obs + self.obs2_buf[self.ptr] = next_obs + self.acts_buf[self.ptr] = act + self.rews_buf[self.ptr] = rew + self.done_buf[self.ptr] = done + self.ptr = (self.ptr+1) % self.max_size + self.size = min(self.size+1, self.max_size) + + def sample_batch(self, batch_size=32): + idxs = np.random.randint(0, self.size, size=batch_size) + return dict(s=self.obs1_buf[idxs], + s2=self.obs2_buf[idxs], + a=self.acts_buf[idxs], + r=self.rews_buf[idxs], + d=self.done_buf[idxs]) + + + + + +def get_scaler(env): + # return scikit-learn scaler object to scale the states + # Note: you could also populate the replay buffer here + + states = [] + for _ in range(env.n_step): + action = np.random.choice(env.action_space) + state, reward, done, info = env.step(action) + states.append(state) + if done: + break + + scaler = StandardScaler() + scaler.fit(states) + return scaler + + + + +def maybe_make_dir(directory): + if not os.path.exists(directory): + os.makedirs(directory) + + + + +def mlp(input_dim, n_action, n_hidden_layers=1, hidden_dim=32): + """ A multi-layer perceptron """ + + # input layer + i = Input(shape=(input_dim,)) + x = i + + # hidden layers + for _ in range(n_hidden_layers): + x = Dense(hidden_dim, activation='relu')(x) + + # final layer + x = Dense(n_action)(x) + + # make the model + model = Model(i, x) + + model.compile(loss='mse', optimizer='adam') + print((model.summary())) + return model + + + + +class MultiStockEnv: + """ + A 3-stock trading environment. + State: vector of size 7 (n_stock * 2 + 1) + - # shares of stock 1 owned + - # shares of stock 2 owned + - # shares of stock 3 owned + - price of stock 1 (using daily close price) + - price of stock 2 + - price of stock 3 + - cash owned (can be used to purchase more stocks) + Action: categorical variable with 27 (3^3) possibilities + - for each stock, you can: + - 0 = sell + - 1 = hold + - 2 = buy + """ + def __init__(self, data, initial_investment=20000): + # data + self.stock_price_history = data + self.n_step, self.n_stock = self.stock_price_history.shape + + # instance attributes + self.initial_investment = initial_investment + self.cur_step = None + self.stock_owned = None + self.stock_price = None + self.cash_in_hand = None + + self.action_space = np.arange(3**self.n_stock) + + # action permutations + # returns a nested list with elements like: + # [0,0,0] + # [0,0,1] + # [0,0,2] + # [0,1,0] + # [0,1,1] + # etc. + # 0 = sell + # 1 = hold + # 2 = buy + self.action_list = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock))) + + # calculate size of state + self.state_dim = self.n_stock * 2 + 1 + + self.reset() + + + def reset(self): + self.cur_step = 0 + self.stock_owned = np.zeros(self.n_stock) + self.stock_price = self.stock_price_history[self.cur_step] + self.cash_in_hand = self.initial_investment + return self._get_obs() + + + def step(self, action): + assert action in self.action_space + + # get current value before performing the action + prev_val = self._get_val() + + # perform the trade + self._trade(action) + + # update price, i.e. go to the next day + self.cur_step += 1 + self.stock_price = self.stock_price_history[self.cur_step] + + # get the new value after taking the action + cur_val = self._get_val() + + # reward is the increase in porfolio value + reward = cur_val - prev_val + + # done if we have run out of data + done = self.cur_step == self.n_step - 1 + + # store the current value of the portfolio here + info = {'cur_val': cur_val} + + # conform to the Gym API + return self._get_obs(), reward, done, info + + + def _get_obs(self): + obs = np.empty(self.state_dim) + obs[:self.n_stock] = self.stock_owned + obs[self.n_stock:2*self.n_stock] = self.stock_price + obs[-1] = self.cash_in_hand + return obs + + + + def _get_val(self): + return self.stock_owned.dot(self.stock_price) + self.cash_in_hand + + + def _trade(self, action): + # index the action we want to perform + # 0 = sell + # 1 = hold + # 2 = buy + # e.g. [2,1,0] means: + # buy first stock + # hold second stock + # sell third stock + action_vec = self.action_list[action] + + # determine which stocks to buy or sell + sell_index = [] # stores index of stocks we want to sell + buy_index = [] # stores index of stocks we want to buy + for i, a in enumerate(action_vec): + if a == 0: + sell_index.append(i) + elif a == 2: + buy_index.append(i) + + # sell any stocks we want to sell + # then buy any stocks we want to buy + if sell_index: + # NOTE: to simplify the problem, when we sell, we will sell ALL shares of that stock + for i in sell_index: + self.cash_in_hand += self.stock_price[i] * self.stock_owned[i] + self.stock_owned[i] = 0 + if buy_index: + # NOTE: when buying, we will loop through each stock we want to buy, + # and buy one share at a time until we run out of cash + can_buy = True + while can_buy: + for i in buy_index: + if self.cash_in_hand > self.stock_price[i]: + self.stock_owned[i] += 1 # buy one share + self.cash_in_hand -= self.stock_price[i] + else: + can_buy = False + + + + + +class DQNAgent(object): + def __init__(self, state_size, action_size): + self.state_size = state_size + self.action_size = action_size + self.memory = ReplayBuffer(state_size, action_size, size=500) + self.gamma = 0.95 # discount rate + self.epsilon = 1.0 # exploration rate + self.epsilon_min = 0.01 + self.epsilon_decay = 0.995 + self.model = mlp(state_size, action_size) + + + def update_replay_memory(self, state, action, reward, next_state, done): + self.memory.store(state, action, reward, next_state, done) + + + def act(self, state): + if np.random.rand() <= self.epsilon: + return np.random.choice(self.action_size) + act_values = self.model.predict(state, verbose=0) + return np.argmax(act_values[0]) # returns action + + + def replay(self, batch_size=32): + # first check if replay buffer contains enough data + if self.memory.size < batch_size: + return + + # sample a batch of data from the replay memory + minibatch = self.memory.sample_batch(batch_size) + states = minibatch['s'] + actions = minibatch['a'] + rewards = minibatch['r'] + next_states = minibatch['s2'] + done = minibatch['d'] + + # Calculate the tentative target: Q(s',a) + target = rewards + (1 - done) * self.gamma * np.amax(self.model.predict(next_states, verbose=0), axis=1) + + # With the Keras API, the target (usually) must have the same + # shape as the predictions. + # However, we only need to update the network for the actions + # which were actually taken. + # We can accomplish this by setting the target to be equal to + # the prediction for all values. + # Then, only change the targets for the actions taken. + # Q(s,a) + target_full = self.model.predict(states, verbose=0) + target_full[np.arange(batch_size), actions] = target + + # Run one training step + self.model.train_on_batch(states, target_full) + + if self.epsilon > self.epsilon_min: + self.epsilon *= self.epsilon_decay + + + def load(self, name): + self.model.load_weights(name) + + + def save(self, name): + self.model.save_weights(name) + + + +def play_one_episode(agent, env, is_train): + # note: after transforming states are already 1xD + state = env.reset() + state = scaler.transform([state]) + done = False + + while not done: + action = agent.act(state) + next_state, reward, done, info = env.step(action) + next_state = scaler.transform([next_state]) + if is_train == 'train': + agent.update_replay_memory(state, action, reward, next_state, done) + agent.replay(batch_size) + state = next_state + + return info['cur_val'] + + + +if __name__ == '__main__': + + # config + models_folder = 'rl_trader_models' + rewards_folder = 'rl_trader_rewards' + model_file = 'dqn.weights.h5' + num_episodes = 2000 + batch_size = 32 + initial_investment = 20000 + + + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') + args = parser.parse_args() + + maybe_make_dir(models_folder) + maybe_make_dir(rewards_folder) + + data = get_data() + n_timesteps, n_stocks = data.shape + + n_train = n_timesteps // 2 + + train_data = data[:n_train] + test_data = data[n_train:] + + env = MultiStockEnv(train_data, initial_investment) + state_size = env.state_dim + action_size = len(env.action_space) + agent = DQNAgent(state_size, action_size) + scaler = get_scaler(env) + + # store the final value of the portfolio (end of episode) + portfolio_value = [] + + if args.mode == 'test': + # then load the previous scaler + with open(f'{models_folder}/scaler.pkl', 'rb') as f: + scaler = pickle.load(f) + + # remake the env with test data + env = MultiStockEnv(test_data, initial_investment) + + # make sure epsilon is not 1! + # no need to run multiple episodes if epsilon = 0, it's deterministic + agent.epsilon = 0.01 + + # load trained weights + agent.load(f'{models_folder}/{model_file}') + + # play the game num_episodes times + for e in range(num_episodes): + t0 = datetime.now() + val = play_one_episode(agent, env, args.mode) + dt = datetime.now() - t0 + print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}") + portfolio_value.append(val) # append episode end portfolio value + + # save the weights when we are done + if args.mode == 'train': + # save the DQN + agent.save(f'{models_folder}/{model_file}') + + # save the scaler + with open(f'{models_folder}/scaler.pkl', 'wb') as f: + pickle.dump(scaler, f) + + + # save portfolio value for each episode + np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) diff --git a/tf2.0/mlp_trader.py b/tf2.0/mlp_trader.py new file mode 100644 index 00000000..e5d53e56 --- /dev/null +++ b/tf2.0/mlp_trader.py @@ -0,0 +1,401 @@ +import numpy as np +import pandas as pd + +from sklearn.neural_network import MLPRegressor +from sklearn.preprocessing import StandardScaler + +from datetime import datetime +import itertools +import argparse +import re +import os +import pickle + + +# Let's use AAPL (Apple), MSI (Motorola), SBUX (Starbucks) +def get_data(): + # returns a T x 3 list of stock prices + # each row is a different stock + # 0 = AAPL + # 1 = MSI + # 2 = SBUX + df = pd.read_csv('aapl_msi_sbux.csv') + return df.values + + + +### The experience replay memory ### +class ReplayBuffer: + def __init__(self, obs_dim, act_dim, size): + self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.acts_buf = np.zeros(size, dtype=np.uint8) + self.rews_buf = np.zeros(size, dtype=np.float32) + self.done_buf = np.zeros(size, dtype=np.uint8) + self.ptr, self.size, self.max_size = 0, 0, size + + def store(self, obs, act, rew, next_obs, done): + self.obs1_buf[self.ptr] = obs + self.obs2_buf[self.ptr] = next_obs + self.acts_buf[self.ptr] = act + self.rews_buf[self.ptr] = rew + self.done_buf[self.ptr] = done + self.ptr = (self.ptr+1) % self.max_size + self.size = min(self.size+1, self.max_size) + + def sample_batch(self, batch_size=32): + idxs = np.random.randint(0, self.size, size=batch_size) + return dict(s=self.obs1_buf[idxs], + s2=self.obs2_buf[idxs], + a=self.acts_buf[idxs], + r=self.rews_buf[idxs], + d=self.done_buf[idxs]) + + + + + +def get_scaler(env): + # return scikit-learn scaler object to scale the states + # Note: you could also populate the replay buffer here + + states = [] + for _ in range(env.n_step): + action = np.random.choice(env.action_space) + state, reward, done, info = env.step(action) + states.append(state) + if done: + break + + scaler = StandardScaler() + scaler.fit(states) + return scaler + + + + +def maybe_make_dir(directory): + if not os.path.exists(directory): + os.makedirs(directory) + + + + +def mlp(input_dim, n_action, n_hidden_layers=1, hidden_dim=32): + """ A multi-layer perceptron """ + + model = MLPRegressor( + hidden_layer_sizes=n_hidden_layers * [hidden_dim], + ) + + # since we'll be first using this to make a prediction with random weights + # we need to know the output size + + # so we'll just start by fitting on some dummy data + X = np.random.randn(100, input_dim) + Y = np.random.randn(100, n_action) + model.partial_fit(X, Y) + + return model + + + + +class MultiStockEnv: + """ + A 3-stock trading environment. + State: vector of size 7 (n_stock * 2 + 1) + - # shares of stock 1 owned + - # shares of stock 2 owned + - # shares of stock 3 owned + - price of stock 1 (using daily close price) + - price of stock 2 + - price of stock 3 + - cash owned (can be used to purchase more stocks) + Action: categorical variable with 27 (3^3) possibilities + - for each stock, you can: + - 0 = sell + - 1 = hold + - 2 = buy + """ + def __init__(self, data, initial_investment=20000): + # data + self.stock_price_history = data + self.n_step, self.n_stock = self.stock_price_history.shape + + # instance attributes + self.initial_investment = initial_investment + self.cur_step = None + self.stock_owned = None + self.stock_price = None + self.cash_in_hand = None + + self.action_space = np.arange(3**self.n_stock) + + # action permutations + # returns a nested list with elements like: + # [0,0,0] + # [0,0,1] + # [0,0,2] + # [0,1,0] + # [0,1,1] + # etc. + # 0 = sell + # 1 = hold + # 2 = buy + self.action_list = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock))) + + # calculate size of state + self.state_dim = self.n_stock * 2 + 1 + + self.reset() + + + def reset(self): + self.cur_step = 0 + self.stock_owned = np.zeros(self.n_stock) + self.stock_price = self.stock_price_history[self.cur_step] + self.cash_in_hand = self.initial_investment + return self._get_obs() + + + def step(self, action): + assert action in self.action_space + + # get current value before performing the action + prev_val = self._get_val() + + # perform the trade + self._trade(action) + + # update price, i.e. go to the next day + self.cur_step += 1 + self.stock_price = self.stock_price_history[self.cur_step] + + # get the new value after taking the action + cur_val = self._get_val() + + # reward is the increase in porfolio value + reward = cur_val - prev_val + + # done if we have run out of data + done = self.cur_step == self.n_step - 1 + + # store the current value of the portfolio here + info = {'cur_val': cur_val} + + # conform to the Gym API + return self._get_obs(), reward, done, info + + + def _get_obs(self): + obs = np.empty(self.state_dim) + obs[:self.n_stock] = self.stock_owned + obs[self.n_stock:2*self.n_stock] = self.stock_price + obs[-1] = self.cash_in_hand + return obs + + + + def _get_val(self): + return self.stock_owned.dot(self.stock_price) + self.cash_in_hand + + + def _trade(self, action): + # index the action we want to perform + # 0 = sell + # 1 = hold + # 2 = buy + # e.g. [2,1,0] means: + # buy first stock + # hold second stock + # sell third stock + action_vec = self.action_list[action] + + # determine which stocks to buy or sell + sell_index = [] # stores index of stocks we want to sell + buy_index = [] # stores index of stocks we want to buy + for i, a in enumerate(action_vec): + if a == 0: + sell_index.append(i) + elif a == 2: + buy_index.append(i) + + # sell any stocks we want to sell + # then buy any stocks we want to buy + if sell_index: + # NOTE: to simplify the problem, when we sell, we will sell ALL shares of that stock + for i in sell_index: + self.cash_in_hand += self.stock_price[i] * self.stock_owned[i] + self.stock_owned[i] = 0 + if buy_index: + # NOTE: when buying, we will loop through each stock we want to buy, + # and buy one share at a time until we run out of cash + can_buy = True + while can_buy: + for i in buy_index: + if self.cash_in_hand > self.stock_price[i]: + self.stock_owned[i] += 1 # buy one share + self.cash_in_hand -= self.stock_price[i] + else: + can_buy = False + + + + + +class DQNAgent(object): + def __init__(self, state_size, action_size): + self.state_size = state_size + self.action_size = action_size + self.memory = ReplayBuffer(state_size, action_size, size=500) + self.gamma = 0.95 # discount rate + self.epsilon = 1.0 # exploration rate + self.epsilon_min = 0.01 + self.epsilon_decay = 0.995 + self.model = mlp(state_size, action_size) + + + def update_replay_memory(self, state, action, reward, next_state, done): + self.memory.store(state, action, reward, next_state, done) + + + def act(self, state): + if np.random.rand() <= self.epsilon: + return np.random.choice(self.action_size) + act_values = self.model.predict(state) + return np.argmax(act_values[0]) # returns action + + def replay(self, batch_size=32): + # first check if replay buffer contains enough data + if self.memory.size < batch_size: + return + + # sample a batch of data from the replay memory + minibatch = self.memory.sample_batch(batch_size) + states = minibatch['s'] + actions = minibatch['a'] + rewards = minibatch['r'] + next_states = minibatch['s2'] + done = minibatch['d'] + + # Calculate the tentative target: Q(s',a) + target = rewards + (1 - done) * self.gamma * np.amax(self.model.predict(next_states), axis=1) + + # With the Keras API, the target (usually) must have the same + # shape as the predictions. + # However, we only need to update the network for the actions + # which were actually taken. + # We can accomplish this by setting the target to be equal to + # the prediction for all values. + # Then, only change the targets for the actions taken. + # Q(s,a) + target_full = self.model.predict(states) + target_full[np.arange(batch_size), actions] = target + + # Run one training step + self.model.partial_fit(states, target_full) + + if self.epsilon > self.epsilon_min: + self.epsilon *= self.epsilon_decay + + + def load(self, name): + with open(name, "rb") as f: + self.model = pickle.load(f) + + + def save(self, name): + with open(name, "wb") as f: + pickle.dump(self.model, f) + + +def play_one_episode(agent, env, is_train): + # note: after transforming states are already 1xD + state = env.reset() + state = scaler.transform([state]) + done = False + + while not done: + action = agent.act(state) + next_state, reward, done, info = env.step(action) + next_state = scaler.transform([next_state]) + if is_train == 'train': + agent.update_replay_memory(state, action, reward, next_state, done) + agent.replay(batch_size) + state = next_state + + return info['cur_val'] + + + +if __name__ == '__main__': + + # config + models_folder = 'rl_trader_models' + rewards_folder = 'rl_trader_rewards' + num_episodes = 2000 + batch_size = 32 + initial_investment = 20000 + + + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') + args = parser.parse_args() + + maybe_make_dir(models_folder) + maybe_make_dir(rewards_folder) + + data = get_data() + n_timesteps, n_stocks = data.shape + + n_train = n_timesteps // 2 + + train_data = data[:n_train] + test_data = data[n_train:] + + env = MultiStockEnv(train_data, initial_investment) + state_size = env.state_dim + action_size = len(env.action_space) + agent = DQNAgent(state_size, action_size) + scaler = get_scaler(env) + + # store the final value of the portfolio (end of episode) + portfolio_value = [] + + if args.mode == 'test': + # then load the previous scaler + with open(f'{models_folder}/scaler.pkl', 'rb') as f: + scaler = pickle.load(f) + + # remake the env with test data + env = MultiStockEnv(test_data, initial_investment) + + # make sure epsilon is not 1! + # no need to run multiple episodes if epsilon = 0, it's deterministic + agent.epsilon = 0.01 + + # load trained weights + agent.load(f'{models_folder}/mlp.pkl') + + # play the game num_episodes times + for e in range(num_episodes): + t0 = datetime.now() + val = play_one_episode(agent, env, args.mode) + dt = datetime.now() - t0 + print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}") + portfolio_value.append(val) # append episode end portfolio value + + # save the weights when we are done + if args.mode == 'train': + # save the DQN + agent.save(f'{models_folder}/mlp.pkl') + + # save the scaler + with open(f'{models_folder}/scaler.pkl', 'wb') as f: + pickle.dump(scaler, f) + + + # save portfolio value for each episode + np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) diff --git a/tf2.0/moore.csv b/tf2.0/moore.csv new file mode 100644 index 00000000..ea97403f --- /dev/null +++ b/tf2.0/moore.csv @@ -0,0 +1,162 @@ +1971,2300 +1972,3500 +1973,2500 +1973,2500 +1974,4100 +1974,4500 +1974,8000 +1975,3510 +1976,5000 +1976,8500 +1976,6500 +1978,9000 +1978,29000 +1979,17500 +1979,29000 +1979,68000 +1981,11500 +1982,55000 +1982,134000 +1983,22000 +1984,63000 +1984,190000 +1985,275000 +1985,25000 +1985,16000 +1986,110000 +1986,375000 +1986,30000 +1987,385000 +1987,730000 +1987,273000 +1987,553000 +1988,180000 +1988,250000 +1989,600000 +1989,1000000 +1989,1180235 +1989,310000 +1990,1200000 +1991,1350000 +1991,35000 +1992,600000 +1992,900000 +1993,2800000 +1993,3100000 +1994,578977 +1994,2500000 +1995,2500000 +1999,111000 +1995,5500000 +1996,4300000 +1997,10000000 +1997,7500000 +1997,8800000 +1998,7500000 +1999,9500000 +1999,13500000 +2000,21000000 +2000,21000000 +1999,27400000 +1999,21300000 +1999,22000000 +2000,42000000 +2001,191000000 +2001,45000000 +2002,55000000 +2004,112000000 +2004,400000000 +2005,169000000 +2006,184000000 +2005,228000000 +2006,362000000 +2007,540000000 +2008,47000000 +2003,54300000 +2003,105900000 +2002,220000000 +2005,165000000 +2005,250000000 +2006,291000000 +2007,169000000 +2003,410000000 +2008,600000000 +2009,760000000 +2011,1870000000 +2012,432000000 +2007,463000000 +2007,26000000 +2008,230000000 +2004,592000000 +2007,411000000 +2008,731000000 +2008,758000000 +2007,789000000 +2009,904000000 +2010,1000000000 +2012,2990000000 +2013,1000000000 +2011,1160000000 +2010,1170000000 +2010,1200000000 +2012,1200000000 +2012,1303000000 +2010,1400000000 +2012,1400000000 +2014,1400000000 +2006,1700000000 +2015,1750000000 +2013,1860000000 +2015,1900000000 +2008,1900000000 +2010,2000000000 +2014,2000000000 +2015,2000000000 +2015,3000000000 +2012,2100000000 +2011,2270000000 +2010,2300000000 +2014,2600000000 +2011,2600000000 +2012,2750000000 +2014,3000000000 +2016,3000000000 +2017,5300000000 +2017,5300000000 +2018,8500000000 +2012,3100000000 +2016,3200000000 +2016,3300000000 +2015,3990000000 +2013,4200000000 +2017,4300000000 +2014,4310000000 +2017,4800000000 +2017,4800000000 +2017,4800000000 +2012,5000000000 +2013,5000000000 +2014,5560000000 +2017,6100000000 +2018,6900000000 +2016,4000000000 +2018,6900000000 +2017,5500000000 +2018,5500000000 +2017,7000000000 +2015,7100000000 +2017,8000000000 +2016,7200000000 +2017,8000000000 +2016,8000000000 +2017,9700000000 +2017,250000000 +2015,10000000000 +2017,5450000000 +2018,10000000000 +2017,4300000000 +2017,18000000000 +2017,19200000000 +2018,8876000000 +2018,23600000000 +2018,9000000000 diff --git a/tf2.0/plot_rl_rewards.py b/tf2.0/plot_rl_rewards.py new file mode 100644 index 00000000..3eb8e171 --- /dev/null +++ b/tf2.0/plot_rl_rewards.py @@ -0,0 +1,22 @@ +import matplotlib.pyplot as plt +import numpy as np +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') +args = parser.parse_args() + +a = np.load(f'rl_trader_rewards/{args.mode}.npy') + +print(f"average reward: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}") + +if args.mode == 'train': + # show the training progress + plt.plot(a) +else: + # test - show a histogram of rewards + plt.hist(a, bins=20) + +plt.title(args.mode) +plt.show() \ No newline at end of file diff --git a/tf2.0/rl_trader.py b/tf2.0/rl_trader.py new file mode 100644 index 00000000..b5849494 --- /dev/null +++ b/tf2.0/rl_trader.py @@ -0,0 +1,414 @@ +import numpy as np +import pandas as pd + +from tensorflow.keras.models import Model +from tensorflow.keras.layers import Dense, Input +from tensorflow.keras.optimizers import Adam + +from datetime import datetime +import itertools +import argparse +import re +import os +import pickle + +from sklearn.preprocessing import StandardScaler + + +import tensorflow as tf +# if tf.__version__.startswith('2'): +# tf.compat.v1.disable_eager_execution() + + +# Let's use AAPL (Apple), MSI (Motorola), SBUX (Starbucks) +def get_data(): + # returns a T x 3 list of stock prices + # each row is a different stock + # 0 = AAPL + # 1 = MSI + # 2 = SBUX + df = pd.read_csv('aapl_msi_sbux.csv') + return df.values + + + +### The experience replay memory ### +class ReplayBuffer: + def __init__(self, obs_dim, act_dim, size): + self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.acts_buf = np.zeros(size, dtype=np.uint8) + self.rews_buf = np.zeros(size, dtype=np.float32) + self.done_buf = np.zeros(size, dtype=np.uint8) + self.ptr, self.size, self.max_size = 0, 0, size + + def store(self, obs, act, rew, next_obs, done): + self.obs1_buf[self.ptr] = obs + self.obs2_buf[self.ptr] = next_obs + self.acts_buf[self.ptr] = act + self.rews_buf[self.ptr] = rew + self.done_buf[self.ptr] = done + self.ptr = (self.ptr+1) % self.max_size + self.size = min(self.size+1, self.max_size) + + def sample_batch(self, batch_size=32): + idxs = np.random.randint(0, self.size, size=batch_size) + return dict(s=self.obs1_buf[idxs], + s2=self.obs2_buf[idxs], + a=self.acts_buf[idxs], + r=self.rews_buf[idxs], + d=self.done_buf[idxs]) + + + + + +def get_scaler(env): + # return scikit-learn scaler object to scale the states + # Note: you could also populate the replay buffer here + + states = [] + for _ in range(env.n_step): + action = np.random.choice(env.action_space) + state, reward, done, info = env.step(action) + states.append(state) + if done: + break + + scaler = StandardScaler() + scaler.fit(states) + return scaler + + + + +def maybe_make_dir(directory): + if not os.path.exists(directory): + os.makedirs(directory) + + + + +def mlp(input_dim, n_action, n_hidden_layers=1, hidden_dim=32): + """ A multi-layer perceptron """ + + # input layer + i = Input(shape=(input_dim,)) + x = i + + # hidden layers + for _ in range(n_hidden_layers): + x = Dense(hidden_dim, activation='relu')(x) + + # final layer + x = Dense(n_action)(x) + + # make the model + model = Model(i, x) + + model.compile(loss='mse', optimizer='adam') + print((model.summary())) + return model + + + + +class MultiStockEnv: + """ + A 3-stock trading environment. + State: vector of size 7 (n_stock * 2 + 1) + - # shares of stock 1 owned + - # shares of stock 2 owned + - # shares of stock 3 owned + - price of stock 1 (using daily close price) + - price of stock 2 + - price of stock 3 + - cash owned (can be used to purchase more stocks) + Action: categorical variable with 27 (3^3) possibilities + - for each stock, you can: + - 0 = sell + - 1 = hold + - 2 = buy + """ + def __init__(self, data, initial_investment=20000): + # data + self.stock_price_history = data + self.n_step, self.n_stock = self.stock_price_history.shape + + # instance attributes + self.initial_investment = initial_investment + self.cur_step = None + self.stock_owned = None + self.stock_price = None + self.cash_in_hand = None + + self.action_space = np.arange(3**self.n_stock) + + # action permutations + # returns a nested list with elements like: + # [0,0,0] + # [0,0,1] + # [0,0,2] + # [0,1,0] + # [0,1,1] + # etc. + # 0 = sell + # 1 = hold + # 2 = buy + self.action_list = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock))) + + # calculate size of state + self.state_dim = self.n_stock * 2 + 1 + + self.reset() + + + def reset(self): + self.cur_step = 0 + self.stock_owned = np.zeros(self.n_stock) + self.stock_price = self.stock_price_history[self.cur_step] + self.cash_in_hand = self.initial_investment + return self._get_obs() + + + def step(self, action): + assert action in self.action_space + + # get current value before performing the action + prev_val = self._get_val() + + # perform the trade + self._trade(action) + + # update price, i.e. go to the next day + self.cur_step += 1 + self.stock_price = self.stock_price_history[self.cur_step] + + # get the new value after taking the action + cur_val = self._get_val() + + # reward is the increase in porfolio value + reward = cur_val - prev_val + + # done if we have run out of data + done = self.cur_step == self.n_step - 1 + + # store the current value of the portfolio here + info = {'cur_val': cur_val} + + # conform to the Gym API + return self._get_obs(), reward, done, info + + + def _get_obs(self): + obs = np.empty(self.state_dim) + obs[:self.n_stock] = self.stock_owned + obs[self.n_stock:2*self.n_stock] = self.stock_price + obs[-1] = self.cash_in_hand + return obs + + + + def _get_val(self): + return self.stock_owned.dot(self.stock_price) + self.cash_in_hand + + + def _trade(self, action): + # index the action we want to perform + # 0 = sell + # 1 = hold + # 2 = buy + # e.g. [2,1,0] means: + # buy first stock + # hold second stock + # sell third stock + action_vec = self.action_list[action] + + # determine which stocks to buy or sell + sell_index = [] # stores index of stocks we want to sell + buy_index = [] # stores index of stocks we want to buy + for i, a in enumerate(action_vec): + if a == 0: + sell_index.append(i) + elif a == 2: + buy_index.append(i) + + # sell any stocks we want to sell + # then buy any stocks we want to buy + if sell_index: + # NOTE: to simplify the problem, when we sell, we will sell ALL shares of that stock + for i in sell_index: + self.cash_in_hand += self.stock_price[i] * self.stock_owned[i] + self.stock_owned[i] = 0 + if buy_index: + # NOTE: when buying, we will loop through each stock we want to buy, + # and buy one share at a time until we run out of cash + can_buy = True + while can_buy: + for i in buy_index: + if self.cash_in_hand > self.stock_price[i]: + self.stock_owned[i] += 1 # buy one share + self.cash_in_hand -= self.stock_price[i] + else: + can_buy = False + + + + + +class DQNAgent(object): + def __init__(self, state_size, action_size): + self.state_size = state_size + self.action_size = action_size + self.memory = ReplayBuffer(state_size, action_size, size=500) + self.gamma = 0.95 # discount rate + self.epsilon = 1.0 # exploration rate + self.epsilon_min = 0.01 + self.epsilon_decay = 0.995 + self.model = mlp(state_size, action_size) + + + def update_replay_memory(self, state, action, reward, next_state, done): + self.memory.store(state, action, reward, next_state, done) + + + def act(self, state): + if np.random.rand() <= self.epsilon: + return np.random.choice(self.action_size) + act_values = self.model.predict(state, verbose=0) + return np.argmax(act_values[0]) # returns action + + @tf.function + def replay(self, batch_size=32): + # first check if replay buffer contains enough data + if self.memory.size < batch_size: + return + + # sample a batch of data from the replay memory + minibatch = self.memory.sample_batch(batch_size) + states = minibatch['s'] + actions = minibatch['a'] + rewards = minibatch['r'] + next_states = minibatch['s2'] + done = minibatch['d'] + + # Calculate the tentative target: Q(s',a) + target = rewards + (1 - done) * self.gamma * np.amax(self.model.predict(next_states, verbose=0), axis=1) + + # With the Keras API, the target (usually) must have the same + # shape as the predictions. + # However, we only need to update the network for the actions + # which were actually taken. + # We can accomplish this by setting the target to be equal to + # the prediction for all values. + # Then, only change the targets for the actions taken. + # Q(s,a) + target_full = self.model.predict(states, verbose=0) + target_full[np.arange(batch_size), actions] = target + + # Run one training step + self.model.train_on_batch(states, target_full) + + if self.epsilon > self.epsilon_min: + self.epsilon *= self.epsilon_decay + + + def load(self, name): + self.model.load_weights(name) + + + def save(self, name): + self.model.save_weights(name) + + + +def play_one_episode(agent, env, is_train): + # note: after transforming states are already 1xD + state = env.reset() + state = scaler.transform([state]) + done = False + + while not done: + action = agent.act(state) + next_state, reward, done, info = env.step(action) + next_state = scaler.transform([next_state]) + if is_train == 'train': + agent.update_replay_memory(state, action, reward, next_state, done) + agent.replay(batch_size) + state = next_state + + return info['cur_val'] + + + +if __name__ == '__main__': + + # config + models_folder = 'rl_trader_models' + rewards_folder = 'rl_trader_rewards' + model_file = 'dqn.weights.h5' + num_episodes = 2000 + batch_size = 32 + initial_investment = 20000 + + + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') + args = parser.parse_args() + + maybe_make_dir(models_folder) + maybe_make_dir(rewards_folder) + + data = get_data() + n_timesteps, n_stocks = data.shape + + n_train = n_timesteps // 2 + + train_data = data[:n_train] + test_data = data[n_train:] + + env = MultiStockEnv(train_data, initial_investment) + state_size = env.state_dim + action_size = len(env.action_space) + agent = DQNAgent(state_size, action_size) + scaler = get_scaler(env) + + # store the final value of the portfolio (end of episode) + portfolio_value = [] + + if args.mode == 'test': + # then load the previous scaler + with open(f'{models_folder}/scaler.pkl', 'rb') as f: + scaler = pickle.load(f) + + # remake the env with test data + env = MultiStockEnv(test_data, initial_investment) + + # make sure epsilon is not 1! + # no need to run multiple episodes if epsilon = 0, it's deterministic + agent.epsilon = 0.01 + + # load trained weights + agent.load(f'{models_folder}/{model_file}') + + # play the game num_episodes times + for e in range(num_episodes): + t0 = datetime.now() + val = play_one_episode(agent, env, args.mode) + dt = datetime.now() - t0 + print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}") + portfolio_value.append(val) # append episode end portfolio value + + # save the weights when we are done + if args.mode == 'train': + # save the DQN + agent.save(f'{models_folder}/{model_file}') + + # save the scaler + with open(f'{models_folder}/scaler.pkl', 'wb') as f: + pickle.dump(scaler, f) + + + # save portfolio value for each episode + np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) \ No newline at end of file diff --git a/tf2.0/sbux.csv b/tf2.0/sbux.csv new file mode 100644 index 00000000..05576b6e --- /dev/null +++ b/tf2.0/sbux.csv @@ -0,0 +1,1260 @@ +date,open,high,low,close,volume,Name +2013-02-08,27.92,28.325,27.92,28.185,7146296,SBUX +2013-02-11,28.26,28.26,27.93,28.07,5457354,SBUX +2013-02-12,28.0,28.275,27.975,28.13,8665592,SBUX +2013-02-13,28.23,28.23,27.75,27.915,7022056,SBUX +2013-02-14,27.765,27.905,27.675,27.775,8899188,SBUX +2013-02-15,27.805,27.85,27.085,27.17,18195730,SBUX +2013-02-19,27.18,27.305,27.01,27.225,11760912,SBUX +2013-02-20,27.3,27.42,26.59,26.655,12472506,SBUX +2013-02-21,26.535,26.82,26.26,26.675,13896450,SBUX +2013-02-22,26.85,27.105,26.64,27.085,11487316,SBUX +2013-02-25,27.2,27.355,26.6,26.605,12333954,SBUX +2013-02-26,26.715,26.93,26.425,26.64,10607724,SBUX +2013-02-27,26.625,27.4875,26.54,27.285,12056302,SBUX +2013-02-28,27.325,27.585,27.225,27.425,10394356,SBUX +2013-03-01,27.315,27.465,27.0,27.435,8451764,SBUX +2013-03-04,27.385,27.86,27.33,27.85,10193852,SBUX +2013-03-05,28.0,28.4,28.0,28.255,12931844,SBUX +2013-03-06,28.38,28.745,28.325,28.55,14925144,SBUX +2013-03-07,28.55,29.25,28.545,29.125,18237018,SBUX +2013-03-08,29.335,29.485,29.0725,29.335,14215718,SBUX +2013-03-11,29.2,29.465,29.165,29.305,9897766,SBUX +2013-03-12,29.225,29.275,28.99,29.14,11670100,SBUX +2013-03-13,29.165,29.4,29.14,29.2925,7435340,SBUX +2013-03-14,29.26,29.375,28.705,28.84,14723066,SBUX +2013-03-15,28.68,28.93,28.63,28.83,15102742,SBUX +2013-03-18,28.53,28.71,28.375,28.465,10521204,SBUX +2013-03-19,28.255,28.49,27.98,28.415,13337034,SBUX +2013-03-20,28.61,28.805,28.5,28.715,9620874,SBUX +2013-03-21,28.65,28.71,28.375,28.525,8307328,SBUX +2013-03-22,28.65,28.875,28.58,28.69,8720670,SBUX +2013-03-25,28.765,28.915,28.075,28.345,10580234,SBUX +2013-03-26,28.495,28.58,28.355,28.525,6128410,SBUX +2013-03-27,28.43,28.475,28.105,28.455,7456828,SBUX +2013-03-28,28.465,28.63,28.43,28.475,7620390,SBUX +2013-04-01,28.565,28.67,28.325,28.435,7009632,SBUX +2013-04-02,28.595,29.165,28.575,29.13,13495550,SBUX +2013-04-03,29.2,29.45,28.69,28.85,11272606,SBUX +2013-04-04,28.805,29.155,28.805,29.055,7568480,SBUX +2013-04-05,28.605,28.9382,28.3592,28.9,8993596,SBUX +2013-04-08,28.915,29.06,28.73,29.06,7343972,SBUX +2013-04-09,28.98,29.0,28.59,28.705,8361158,SBUX +2013-04-10,28.82,29.155,28.8045,28.9,8577388,SBUX +2013-04-11,28.975,29.495,28.9,29.2875,10416656,SBUX +2013-04-12,29.355,29.855,29.3,29.545,10418310,SBUX +2013-04-15,29.33,29.72,28.8,28.855,10700276,SBUX +2013-04-16,28.97,29.47,28.86,29.28,8849328,SBUX +2013-04-17,29.015,29.275,28.825,29.085,7207368,SBUX +2013-04-18,29.18,29.24,28.75,28.86,8776706,SBUX +2013-04-19,28.79,29.29,28.77,29.2025,8605504,SBUX +2013-04-22,29.21,29.435,28.99,29.32,5547302,SBUX +2013-04-23,29.42,29.95,29.3899,29.695,11323422,SBUX +2013-04-24,29.845,29.985,29.565,29.915,10410890,SBUX +2013-04-25,30.0,30.32,29.955,30.25,16550532,SBUX +2013-04-26,29.65,30.195,29.6,30.0,14970972,SBUX +2013-04-29,30.175,30.46,30.065,30.29,7400546,SBUX +2013-04-30,30.315,30.455,30.155,30.42,8041092,SBUX +2013-05-01,30.275,30.34,29.975,30.07,6294596,SBUX +2013-05-02,30.22,30.34,29.8,30.19,6237460,SBUX +2013-05-03,30.425,30.985,30.35,30.935,9835550,SBUX +2013-05-06,31.0,31.24,30.96,31.24,6997134,SBUX +2013-05-07,31.265,31.265,30.855,31.095,7495628,SBUX +2013-05-08,31.17,31.22,30.865,31.205,5507696,SBUX +2013-05-09,31.33,31.45,31.02,31.18,8276152,SBUX +2013-05-10,31.285,31.595,31.195,31.5485,7480820,SBUX +2013-05-13,31.6,31.615,31.305,31.41,5906892,SBUX +2013-05-14,31.46,31.785,31.39,31.76,7510580,SBUX +2013-05-15,31.6592,32.1,31.61,32.035,9654546,SBUX +2013-05-16,32.0,32.0325,31.725,31.775,6661036,SBUX +2013-05-17,31.885,32.07,31.75,32.065,6934282,SBUX +2013-05-20,32.015,32.305,31.89,31.915,7211790,SBUX +2013-05-21,32.02,32.23,31.94,32.125,6588946,SBUX +2013-05-22,32.075,32.465,31.98,32.075,11012050,SBUX +2013-05-23,31.845,31.91,31.515,31.76,8258348,SBUX +2013-05-24,31.54,31.7,31.305,31.68,6507242,SBUX +2013-05-28,32.105,32.35,32.02,32.13,7438516,SBUX +2013-05-29,31.895,32.05,31.5575,31.815,7560250,SBUX +2013-05-30,31.935,32.0221,31.7275,31.735,8798456,SBUX +2013-05-31,31.605,32.1725,31.395,31.57,10205308,SBUX +2013-06-03,31.63,31.77,31.31,31.73,11417578,SBUX +2013-06-04,31.305,32.07,31.305,31.665,8622564,SBUX +2013-06-05,31.52,31.65,31.155,31.17,8408538,SBUX +2013-06-06,31.175,31.5275,31.16,31.51,8625308,SBUX +2013-06-07,31.705,32.545,31.705,32.52,13752898,SBUX +2013-06-10,32.83,33.155,32.74,33.055,12412340,SBUX +2013-06-11,32.735,33.0525,32.655,32.71,7897496,SBUX +2013-06-12,32.91,33.0,32.13,32.225,10069920,SBUX +2013-06-13,32.27,33.095,32.035,32.985,11909482,SBUX +2013-06-14,32.975,33.335,32.725,32.8,9854434,SBUX +2013-06-17,33.095,33.28,32.8575,33.015,8115800,SBUX +2013-06-18,33.0,33.565,32.975,33.5475,7451352,SBUX +2013-06-19,33.51,33.74,33.205,33.205,10644698,SBUX +2013-06-20,32.91,33.16,32.52,32.61,11606540,SBUX +2013-06-21,32.855,32.96,32.115,32.345,14709502,SBUX +2013-06-24,32.0,32.289,31.59,32.005,9913028,SBUX +2013-06-25,32.265,32.68,32.22,32.37,9856852,SBUX +2013-06-26,32.685,33.3546,32.4455,32.9,9545442,SBUX +2013-06-27,33.135,33.25,32.825,32.845,6292594,SBUX +2013-06-28,32.755,33.125,32.7,32.755,10836506,SBUX +2013-07-01,33.045,33.245,32.91,33.12,6692040,SBUX +2013-07-02,33.12,33.6,33.04,33.395,6765528,SBUX +2013-07-03,33.215,33.84,33.2,33.65,5635188,SBUX +2013-07-05,33.935,34.0,33.45,33.86,5189244,SBUX +2013-07-08,33.97,34.41,33.94,34.145,8029920,SBUX +2013-07-09,34.45,34.5,33.995,34.065,7459914,SBUX +2013-07-10,33.935,34.06,33.725,34.05,8090792,SBUX +2013-07-11,34.34,34.7575,34.175,34.67,8385798,SBUX +2013-07-12,34.62,34.86,34.39,34.86,9706492,SBUX +2013-07-15,34.845,34.95,34.765,34.83,6180564,SBUX +2013-07-16,34.82,34.86,34.54,34.76,6068146,SBUX +2013-07-17,34.87,34.9076,33.945,34.1,12147310,SBUX +2013-07-18,34.225,34.505,34.02,34.24,8537754,SBUX +2013-07-19,34.285,34.555,34.2,34.395,7196954,SBUX +2013-07-22,34.54,34.605,34.275,34.51,6559682,SBUX +2013-07-23,34.62,34.66,33.725,33.83,8465332,SBUX +2013-07-24,34.06,34.215,33.15,33.305,14033776,SBUX +2013-07-25,33.47,34.15,33.365,34.085,15971984,SBUX +2013-07-26,36.3,36.76,35.925,36.68,32293248,SBUX +2013-07-29,36.375,36.54,36.05,36.225,12801986,SBUX +2013-07-30,36.45,36.49,35.855,35.965,9808846,SBUX +2013-07-31,36.07,36.25,35.61,35.6445,11248506,SBUX +2013-08-01,36.135,36.865,36.04,36.74,12849582,SBUX +2013-08-02,36.75,37.135,36.58,37.115,9394576,SBUX +2013-08-05,37.11,37.135,36.855,36.985,7606188,SBUX +2013-08-06,36.83,36.925,36.39,36.4,7956060,SBUX +2013-08-07,36.18,36.575,36.0825,36.095,7068922,SBUX +2013-08-08,36.385,36.7,36.2,36.47,5923772,SBUX +2013-08-09,36.295,36.6154,36.26,36.4,5366324,SBUX +2013-08-12,36.14,36.565,36.125,36.465,5473182,SBUX +2013-08-13,36.545,36.545,35.9674,36.32,5377794,SBUX +2013-08-14,36.29,36.29,35.895,35.925,4932052,SBUX +2013-08-15,35.655,35.675,35.155,35.37,7198878,SBUX +2013-08-16,35.49,35.695,35.275,35.355,6929830,SBUX +2013-08-19,35.29,35.6475,35.09,35.145,6349108,SBUX +2013-08-20,35.18,35.5125,35.0558,35.33,6046820,SBUX +2013-08-21,35.21,35.7,35.14,35.3565,6917088,SBUX +2013-08-22,35.54,36.13,35.505,35.95,5399676,SBUX +2013-08-23,36.0325,36.095,35.79,35.985,5869740,SBUX +2013-08-26,36.035,36.105,35.8,35.94,4928868,SBUX +2013-08-27,35.45,35.685,35.055,35.08,8021076,SBUX +2013-08-28,35.045,35.66,34.93,35.48,6249812,SBUX +2013-08-29,35.365,35.92,35.25,35.59,5509920,SBUX +2013-08-30,35.69,35.7,35.165,35.26,5666988,SBUX +2013-09-03,35.85,36.07,35.595,35.8,7249600,SBUX +2013-09-04,35.71,36.135,35.5455,36.07,6502234,SBUX +2013-09-05,36.125,36.38,36.005,36.025,4931220,SBUX +2013-09-06,36.185,36.325,35.465,35.785,6356670,SBUX +2013-09-09,35.84,36.245,35.835,36.22,4936828,SBUX +2013-09-10,36.49,37.16,36.465,37.1075,11779224,SBUX +2013-09-11,37.165,37.7499,37.165,37.695,9982644,SBUX +2013-09-12,37.75,37.955,37.5603,37.835,7628560,SBUX +2013-09-13,37.795,37.85,37.325,37.785,5767336,SBUX +2013-09-16,38.285,38.3,37.43,37.62,8787132,SBUX +2013-09-17,37.63,38.12,37.585,38.02,6661016,SBUX +2013-09-18,37.94,38.81,37.775,38.665,9488762,SBUX +2013-09-19,38.76,38.9225,38.15,38.175,8042062,SBUX +2013-09-20,38.13,38.5,38.035,38.06,9620500,SBUX +2013-09-23,38.055,38.19,37.535,37.68,6783882,SBUX +2013-09-24,37.765,38.655,37.739000000000004,38.275,7633976,SBUX +2013-09-25,38.42,38.505,38.0352,38.17,7261490,SBUX +2013-09-26,38.32,38.6395,38.16,38.59,4988814,SBUX +2013-09-27,38.455,38.74,38.2,38.665,6200278,SBUX +2013-09-30,38.25,38.5425,38.025,38.485,8828424,SBUX +2013-10-01,38.49,38.67,38.28,38.58,5205160,SBUX +2013-10-02,38.295,38.615,38.105,38.595,6088422,SBUX +2013-10-03,38.48,38.73,38.16,38.435,8164122,SBUX +2013-10-04,38.4,38.73,38.255,38.7,5226696,SBUX +2013-10-07,38.3,38.695,38.205,38.4305,5106784,SBUX +2013-10-08,38.555,39.015,37.72,37.765,12663250,SBUX +2013-10-09,37.695,37.9,37.2263,37.63,9482078,SBUX +2013-10-10,38.145,38.655,38.065,38.56,6638096,SBUX +2013-10-11,38.61,38.92,38.44,38.91,5530376,SBUX +2013-10-14,38.7675,39.16,38.625,39.05,5986662,SBUX +2013-10-15,38.915,38.9475,38.3,38.355,8146874,SBUX +2013-10-16,38.6,39.1125,38.445,39.02,10103938,SBUX +2013-10-17,38.835,39.425,38.725,39.3675,6562580,SBUX +2013-10-18,39.635,39.835,39.485,39.655,9049968,SBUX +2013-10-21,39.555,39.955,39.335,39.73,6770290,SBUX +2013-10-22,39.95,40.54,39.8,40.45,8419570,SBUX +2013-10-23,40.225,40.425,39.945,40.025,7908094,SBUX +2013-10-24,39.68,39.72,39.255,39.525,13508366,SBUX +2013-10-25,39.735,40.0,39.635,39.98,7251840,SBUX +2013-10-28,40.1,40.215,39.2625,39.355,11242732,SBUX +2013-10-29,39.605,39.81,39.525,39.81,7898436,SBUX +2013-10-30,40.315,40.425,39.88,40.415,17518716,SBUX +2013-10-31,39.345,40.81,39.2859,40.525,20491552,SBUX +2013-11-01,40.77,40.77,39.835,40.185,12005206,SBUX +2013-11-04,40.375,40.39,40.105,40.185,8285184,SBUX +2013-11-05,40.18,41.17,40.075,40.995,10092844,SBUX +2013-11-06,41.195,41.25,40.485,40.565,8562294,SBUX +2013-11-07,40.65,40.675,39.5,39.535,13283288,SBUX +2013-11-08,39.755,40.625,39.7,40.6,11487736,SBUX +2013-11-11,40.6,40.71,40.49,40.495,5146208,SBUX +2013-11-12,40.38,40.5249,40.06,40.3075,6613072,SBUX +2013-11-13,39.62,40.75,39.595,40.7305,12519058,SBUX +2013-11-14,40.765,40.77,40.3925,40.57,7647688,SBUX +2013-11-15,40.625,40.725,40.39,40.595,6445400,SBUX +2013-11-18,40.51,40.67,40.105,40.27,8322628,SBUX +2013-11-19,40.005,40.46,39.93,39.96,8725944,SBUX +2013-11-20,40.065,40.125,39.66,39.845,8734150,SBUX +2013-11-21,39.9,40.825,39.875,40.765,13139094,SBUX +2013-11-22,40.85,40.85,40.55,40.675,7926170,SBUX +2013-11-25,40.685,40.74,40.255,40.355,9001126,SBUX +2013-11-26,40.43,40.825,40.2025,40.755,8767864,SBUX +2013-11-27,40.695,40.955,40.6,40.81,4752000,SBUX +2013-11-29,40.925,41.185,40.685,40.73,4387144,SBUX +2013-12-02,40.745,40.8445,40.455,40.535,5773800,SBUX +2013-12-03,40.37,40.5775,40.145,40.275,7893264,SBUX +2013-12-04,40.195,40.36,39.72,39.75,10206454,SBUX +2013-12-05,39.78,40.125,39.665,39.86,6569194,SBUX +2013-12-06,40.2975,40.375,39.855,39.97,6736632,SBUX +2013-12-09,40.13,40.23,39.8025,39.865,7936162,SBUX +2013-12-10,39.495,39.535,38.34,38.69,26329504,SBUX +2013-12-11,38.8,38.9975,38.145,38.2,15751996,SBUX +2013-12-12,38.14,38.545,38.0,38.24,9651038,SBUX +2013-12-13,38.415,38.465,38.045,38.175,8052582,SBUX +2013-12-16,38.015,38.49,37.955,38.23,9348824,SBUX +2013-12-17,38.29,38.29,37.955,38.045,7008900,SBUX +2013-12-18,38.065,38.845,38.05,38.84,10268256,SBUX +2013-12-19,39.12,39.25,38.51,38.575,11406282,SBUX +2013-12-20,38.76,38.99,38.622,38.83,12707400,SBUX +2013-12-23,38.985,39.17,38.675,39.16,7624552,SBUX +2013-12-24,39.05,39.375,38.975,39.285,3896612,SBUX +2013-12-26,39.365,39.5125,39.2195,39.44,4391338,SBUX +2013-12-27,39.645,39.65,39.23,39.285,4506128,SBUX +2013-12-30,39.395,39.415,38.93,39.275,4734486,SBUX +2013-12-31,39.215,39.39,39.005,39.195,6016240,SBUX +2014-01-02,39.035,39.135,38.5025,38.585,8528022,SBUX +2014-01-03,38.74,38.885,38.47,38.475,6545626,SBUX +2014-01-06,38.45,38.675,38.005,38.085,10604900,SBUX +2014-01-07,38.325,38.705,38.235,38.605,8180398,SBUX +2014-01-08,38.64,39.0735,38.595,39.015,10190576,SBUX +2014-01-09,39.035,39.05,38.4,38.8,8370276,SBUX +2014-01-10,38.78,38.995,38.475,38.835,6475950,SBUX +2014-01-13,38.69,38.695,37.38,37.56,14730286,SBUX +2014-01-14,37.56,38.1775,37.375,37.73,18668208,SBUX +2014-01-15,37.77,38.165,37.705,38.095,8721564,SBUX +2014-01-16,37.91,38.0975,37.565,37.645,9110924,SBUX +2014-01-17,37.5,37.73,37.335,37.45,13007820,SBUX +2014-01-21,37.525,37.535,36.63,36.825,18777272,SBUX +2014-01-22,36.975,37.1,36.77,36.8,13989730,SBUX +2014-01-23,36.74,36.865,35.84,36.695,30451212,SBUX +2014-01-24,37.365,38.08,37.055,37.49,33218428,SBUX +2014-01-27,37.615,37.615,37.01,37.105,18212030,SBUX +2014-01-28,37.285,37.3975,36.835,36.945,11103896,SBUX +2014-01-29,36.75,36.83,35.73,35.78,16002076,SBUX +2014-01-30,36.14,36.215,35.655,35.955,15181598,SBUX +2014-01-31,35.445,35.9705,35.435,35.56,12287056,SBUX +2014-02-03,35.5,35.755,34.335,34.485,20353852,SBUX +2014-02-04,35.0,35.6,34.685,35.325,21066236,SBUX +2014-02-05,35.045,35.37,34.805,35.245,10968328,SBUX +2014-02-06,35.33,36.4,35.28,36.18,13819714,SBUX +2014-02-07,36.805,37.245,36.345,37.0175,14752770,SBUX +2014-02-10,37.155,37.59,37.155,37.4,12948390,SBUX +2014-02-11,37.435,37.595,36.895,37.25,16165852,SBUX +2014-02-12,37.245,37.53,36.85,36.955,9973052,SBUX +2014-02-13,36.78,37.345,36.695,37.345,8477256,SBUX +2014-02-14,37.175,37.545,37.005,37.515,8266438,SBUX +2014-02-18,37.495,37.5,36.9675,36.985,10967746,SBUX +2014-02-19,36.92,37.14,36.615,36.66,9802100,SBUX +2014-02-20,36.7,36.855,36.24,36.775,8553032,SBUX +2014-02-21,36.89,36.93,36.26,36.28,11298258,SBUX +2014-02-24,36.33,36.44,36.01,36.28,11953270,SBUX +2014-02-25,36.25,36.33,35.275,35.275,18641246,SBUX +2014-02-26,35.4,36.11,34.975,35.89,19156536,SBUX +2014-02-27,35.81,36.12,35.715,36.095,11298448,SBUX +2014-02-28,35.77,35.9663,35.25,35.48,22465506,SBUX +2014-03-03,35.005,35.365,35.0,35.235,12248352,SBUX +2014-03-04,35.715,35.98,35.525,35.83,12255104,SBUX +2014-03-05,35.995,36.045,35.44,35.65,9783410,SBUX +2014-03-06,35.915,36.6675,35.885,36.345,13781048,SBUX +2014-03-07,36.495,36.55,36.105,36.535,8409372,SBUX +2014-03-10,36.625,36.82,36.395,36.78,8648776,SBUX +2014-03-11,36.995,37.704,36.925,37.515,18368750,SBUX +2014-03-12,37.275,37.83,37.25,37.815,10450862,SBUX +2014-03-13,37.92,38.21,37.015,37.215,11380782,SBUX +2014-03-14,37.045,37.445,37.0118,37.135,8962602,SBUX +2014-03-17,37.405,37.495,36.91,37.09,11019894,SBUX +2014-03-18,37.175,37.42,37.025,37.3,5997520,SBUX +2014-03-19,37.5,38.665,37.41,37.955,24983140,SBUX +2014-03-20,38.145,38.565,37.8,38.4775,13851476,SBUX +2014-03-21,39.105,39.32,38.31,38.355,18036904,SBUX +2014-03-24,38.45,38.555,37.605,37.885,10018724,SBUX +2014-03-25,38.22,38.255,37.145,37.305,10568158,SBUX +2014-03-26,37.465,37.495,36.76,36.77,8371118,SBUX +2014-03-27,36.535,36.98,36.345,36.7,12675658,SBUX +2014-03-28,36.645,37.03,36.525,36.85,6582750,SBUX +2014-03-31,36.95,37.2,36.585,36.69,7721754,SBUX +2014-04-01,36.82,37.49,36.705,37.005,8741780,SBUX +2014-04-02,37.175,37.185,36.62,36.835,8275518,SBUX +2014-04-03,36.93,37.17,36.41,36.545,7165454,SBUX +2014-04-04,36.775,36.945,35.66,35.775,11708546,SBUX +2014-04-07,35.6,35.69,35.025,35.215,11089724,SBUX +2014-04-08,35.32,35.8475,35.25,35.74,10679090,SBUX +2014-04-09,35.815,36.34,35.6,36.24,9055586,SBUX +2014-04-10,36.37,36.37,35.085,35.11,13375470,SBUX +2014-04-11,34.895,35.065,34.34,34.365,16368844,SBUX +2014-04-14,34.815,35.02,34.35,34.655,12176480,SBUX +2014-04-15,34.98,35.05,33.965,34.445,15461760,SBUX +2014-04-16,34.825,35.44,34.58,35.395,11278644,SBUX +2014-04-17,35.285,35.695,35.07,35.075,9245966,SBUX +2014-04-21,35.155,35.3,34.925,35.24,6098442,SBUX +2014-04-22,35.265,35.6375,35.165,35.5745,8862690,SBUX +2014-04-23,35.73,35.735,35.11,35.195,8881548,SBUX +2014-04-24,35.76,36.02,35.055,35.545,15577016,SBUX +2014-04-25,36.0,36.125,35.47,35.725,18122130,SBUX +2014-04-28,35.945,36.01,34.9,35.465,10733556,SBUX +2014-04-29,35.59,35.725,35.1325,35.32,9830164,SBUX +2014-04-30,35.31,35.38,35.06,35.31,7283078,SBUX +2014-05-01,35.375,35.6,35.16,35.56,7919194,SBUX +2014-05-02,35.625,35.7625,35.285,35.3,8306490,SBUX +2014-05-05,35.25,35.5275,35.09,35.46,5486598,SBUX +2014-05-06,35.365,35.415,34.665,34.79,10926786,SBUX +2014-05-07,34.89,34.9975,34.57,34.87,10961996,SBUX +2014-05-08,34.8,35.2475,34.67,34.79,7531344,SBUX +2014-05-09,34.85,35.155,34.7612,35.145,6623136,SBUX +2014-05-12,35.32,35.62,35.2,35.575,7227204,SBUX +2014-05-13,35.485,35.67,35.45,35.58,5802916,SBUX +2014-05-14,35.59,35.6,35.015,35.085,8586022,SBUX +2014-05-15,35.13,35.165,34.64,34.925,9124686,SBUX +2014-05-16,35.0,35.52,34.905,35.47,9294826,SBUX +2014-05-19,35.28,35.56,35.14,35.51,6757810,SBUX +2014-05-20,35.38,35.448,34.9201,35.115,8699878,SBUX +2014-05-21,35.355,35.375,35.035,35.2,6089286,SBUX +2014-05-22,35.175,35.885,35.15,35.7,7359636,SBUX +2014-05-23,36.145,36.255,35.915,35.99,7166310,SBUX +2014-05-27,36.32,36.89,36.27,36.83,10100398,SBUX +2014-05-28,36.65,36.785,36.4575,36.635,8212030,SBUX +2014-05-29,36.76,36.78,36.325,36.555,6448878,SBUX +2014-05-30,36.58,36.75,36.265,36.62,6879534,SBUX +2014-06-02,36.61,37.0275,36.58,36.925,5926156,SBUX +2014-06-03,36.86,37.175,36.79,37.09,6768354,SBUX +2014-06-04,37.06,37.35,36.82,37.335,6495704,SBUX +2014-06-05,37.18,37.57,37.1,37.36,5188766,SBUX +2014-06-06,37.53,37.77,37.4,37.665,6204706,SBUX +2014-06-09,37.69,37.7,37.325,37.59,5701018,SBUX +2014-06-10,37.625,37.72,37.16,37.3,6640912,SBUX +2014-06-11,37.07,37.435,36.925,37.4,7510866,SBUX +2014-06-12,37.345,37.35,36.865,36.98,7193748,SBUX +2014-06-13,36.935,37.4475,36.765,37.345,7728298,SBUX +2014-06-16,37.24,37.58,37.23,37.545,6759676,SBUX +2014-06-17,37.515,37.825,37.495,37.655,5778600,SBUX +2014-06-18,37.6,37.8424,37.3,37.78,5786570,SBUX +2014-06-19,38.275,38.75,38.23,38.615,12555078,SBUX +2014-06-20,38.815,38.815,38.205,38.3,12446594,SBUX +2014-06-23,38.345,38.4,38.0955,38.365,4780230,SBUX +2014-06-24,38.45,38.875,38.27,38.715,10912302,SBUX +2014-06-25,38.52,39.09,38.48,39.06,7813834,SBUX +2014-06-26,39.005,39.165,38.735,39.03,6983266,SBUX +2014-06-27,38.98,39.175,38.895,38.97,8669268,SBUX +2014-06-30,39.05,39.095,38.54,38.69,9610248,SBUX +2014-07-01,38.945,39.1,38.8,39.04,8073888,SBUX +2014-07-02,39.19,39.2045,38.955,39.095,4794618,SBUX +2014-07-03,39.195,39.7,39.07,39.53,6662616,SBUX +2014-07-07,39.39,39.69,39.305,39.345,7443924,SBUX +2014-07-08,39.32,39.45,39.125,39.28,7802080,SBUX +2014-07-09,39.27,39.74,39.1801,39.725,7783444,SBUX +2014-07-10,39.305,39.525,39.095,39.425,4720762,SBUX +2014-07-11,39.48,39.495,39.07,39.3,4254768,SBUX +2014-07-14,39.49,39.49,39.21,39.28,4562120,SBUX +2014-07-15,39.325,39.575,39.23,39.445,8369728,SBUX +2014-07-16,39.53,39.53,39.155,39.365,8734258,SBUX +2014-07-17,39.06,39.23,38.56,38.62,8446350,SBUX +2014-07-18,38.85,39.075,38.625,38.97,6744520,SBUX +2014-07-21,38.93,38.965,38.585,38.805,5021858,SBUX +2014-07-22,39.165,39.515,39.1,39.37,6457244,SBUX +2014-07-23,39.255,39.6395,39.2,39.57,6450280,SBUX +2014-07-24,39.795,40.32,39.575,40.225,16129286,SBUX +2014-07-25,39.2,39.66,39.0,39.37,18984366,SBUX +2014-07-28,39.4,39.5,39.0865,39.18,8012924,SBUX +2014-07-29,39.245,39.625,39.205,39.325,7937248,SBUX +2014-07-30,39.5,39.59,39.26,39.45,8911708,SBUX +2014-07-31,39.26,39.35,38.76,38.84,8147584,SBUX +2014-08-01,38.75,38.945,38.29,38.49,7798186,SBUX +2014-08-04,38.63,38.88,38.515,38.765,6282674,SBUX +2014-08-05,38.585,38.73,38.215,38.395,6696400,SBUX +2014-08-06,38.25,38.72,38.135,38.565,5935218,SBUX +2014-08-07,38.665,38.72,38.28,38.355,5427964,SBUX +2014-08-08,38.315,38.835,38.255,38.81,5957988,SBUX +2014-08-11,38.835,39.125,38.825,38.935,4522400,SBUX +2014-08-12,38.935,39.0718,38.76,38.91,4723844,SBUX +2014-08-13,39.05,39.05,38.565,38.62,6935144,SBUX +2014-08-14,38.74,38.75,38.08,38.31,10892582,SBUX +2014-08-15,38.61,38.64,38.185,38.455,8090044,SBUX +2014-08-18,38.595,38.93,38.59,38.795,6845416,SBUX +2014-08-19,38.9,39.1,38.8,39.06,4919030,SBUX +2014-08-20,39.005,39.235,38.87,39.015,4668758,SBUX +2014-08-21,38.905,39.085,38.695,38.735,4811992,SBUX +2014-08-22,38.62,38.8,38.525,38.64,4552328,SBUX +2014-08-25,38.82,39.11,38.755,38.985,5698322,SBUX +2014-08-26,39.005,39.215,38.8775,38.895,5513856,SBUX +2014-08-27,38.955,39.135,38.83,38.96,4904018,SBUX +2014-08-28,38.895,39.015,38.755,38.905,3958032,SBUX +2014-08-29,39.035,39.05,38.76,38.905,4657356,SBUX +2014-09-02,38.85,38.99,38.58,38.74,6197468,SBUX +2014-09-03,38.78,38.965,38.35,38.395,6796736,SBUX +2014-09-04,38.4,38.6863,38.38,38.58,5749790,SBUX +2014-09-05,38.505,38.985,38.405,38.975,8026608,SBUX +2014-09-08,38.88,38.98,38.665,38.835,4492776,SBUX +2014-09-09,38.725,38.875,38.49,38.56,5114572,SBUX +2014-09-10,38.505,38.7,38.375,38.605,5918672,SBUX +2014-09-11,38.49,38.5,38.04,38.06,9365624,SBUX +2014-09-12,37.915,38.035,37.47,37.735,14619348,SBUX +2014-09-15,37.63,37.67,37.195,37.46,10845708,SBUX +2014-09-16,37.42,37.725,37.265,37.545,8041214,SBUX +2014-09-17,37.67,37.9105,37.5525,37.67,7475730,SBUX +2014-09-18,37.795,37.945,37.5,37.865,8028914,SBUX +2014-09-19,37.985,38.165,37.885,38.035,12040726,SBUX +2014-09-22,38.025,38.025,37.165,37.3,7859646,SBUX +2014-09-23,37.145,37.44,36.89,36.9775,7739386,SBUX +2014-09-24,37.265,37.715,37.175,37.66,8599714,SBUX +2014-09-25,37.59,37.64,37.045,37.06,8010814,SBUX +2014-09-26,37.0,37.615,36.97,37.585,7659794,SBUX +2014-09-29,37.275,37.76,37.225,37.635,6376526,SBUX +2014-09-30,37.78,37.925,37.51,37.73,7800904,SBUX +2014-10-01,37.84,37.955,37.205,37.305,8119064,SBUX +2014-10-02,37.205,37.5,36.89,37.225,8573520,SBUX +2014-10-03,37.4,38.05,37.385,37.945,8229168,SBUX +2014-10-06,38.01,38.115,37.55,37.5725,5221748,SBUX +2014-10-07,37.45,37.525,37.01,37.025,6383432,SBUX +2014-10-08,37.12,37.71,36.815,37.63,6716308,SBUX +2014-10-09,37.52,37.91,37.18,37.24,9343828,SBUX +2014-10-10,37.13,37.895,37.05,37.23,10495940,SBUX +2014-10-13,37.105,37.22,36.005,36.095,12431414,SBUX +2014-10-14,36.205,36.79,36.105,36.37,10995022,SBUX +2014-10-15,36.015,36.465,35.635,36.19,13622008,SBUX +2014-10-16,35.59,36.435,35.385,36.32,9738832,SBUX +2014-10-17,36.675,36.955,36.44,36.77,9581060,SBUX +2014-10-20,36.805,37.375,36.7025,37.35,7957028,SBUX +2014-10-21,37.5,37.5925,37.07,37.18,12312930,SBUX +2014-10-22,37.24,37.4925,37.08,37.3,6469670,SBUX +2014-10-23,37.575,37.755,37.38,37.42,6837730,SBUX +2014-10-24,37.45,37.955,37.175,37.905,7359034,SBUX +2014-10-27,38.005,38.19,37.91,37.985,5796276,SBUX +2014-10-28,38.17,38.535,37.94,38.525,8150988,SBUX +2014-10-29,38.55,38.8325,38.11,38.27,8794902,SBUX +2014-10-30,38.09,38.725,38.055,38.66,13388970,SBUX +2014-10-31,37.625,38.37,37.46,37.78,35889908,SBUX +2014-11-03,37.985,38.135,37.735,38.05,10606936,SBUX +2014-11-04,37.805,38.47,37.75,38.355,10420764,SBUX +2014-11-05,38.505,38.695,38.295,38.33,7403396,SBUX +2014-11-06,38.485,38.775,38.33,38.725,6167276,SBUX +2014-11-07,38.82,39.045,38.605,38.895,8248994,SBUX +2014-11-10,38.82,38.95,38.585,38.825,6555370,SBUX +2014-11-11,38.915,39.1,38.625,38.865,5201438,SBUX +2014-11-12,38.705,39.005,38.635,38.925,5312194,SBUX +2014-11-13,39.095,39.24,38.7059,38.945,6842248,SBUX +2014-11-14,39.04,39.15,38.865,39.06,6039282,SBUX +2014-11-17,38.97,39.235,38.8225,38.915,5899020,SBUX +2014-11-18,38.815,39.0,38.6575,38.785,5755816,SBUX +2014-11-19,38.72,39.04,38.7,38.91,5231186,SBUX +2014-11-20,38.83,39.32,38.825,39.1,6287096,SBUX +2014-11-21,39.565,39.96,39.385,39.88,14027718,SBUX +2014-11-24,39.95,40.41,39.855,40.26,9426200,SBUX +2014-11-25,40.28,40.46,40.025,40.105,8118386,SBUX +2014-11-26,40.06,40.155,39.7,39.85,6196752,SBUX +2014-11-28,40.215,40.82,40.205,40.605,6766674,SBUX +2014-12-01,40.5,40.75,40.31,40.425,8627478,SBUX +2014-12-02,40.345,40.425,40.045,40.185,7656398,SBUX +2014-12-03,40.21,40.375,40.035,40.235,11201396,SBUX +2014-12-04,40.3,41.15,40.195,40.655,14070910,SBUX +2014-12-05,41.25,41.96,41.215,41.785,14788168,SBUX +2014-12-08,42.1,42.1,41.55,41.9,11770652,SBUX +2014-12-09,41.535,41.645,41.1,41.515,8725620,SBUX +2014-12-10,41.45,42.02,41.275,41.33,10096320,SBUX +2014-12-11,41.45,42.01,41.3275,41.56,10058296,SBUX +2014-12-12,41.37,41.985,41.285,41.625,9164230,SBUX +2014-12-15,41.135,41.25,40.345,40.445,16286986,SBUX +2014-12-16,40.245,40.545,39.555,39.565,13981874,SBUX +2014-12-17,39.565,40.295,39.22,40.2175,10161624,SBUX +2014-12-18,40.775,40.82,39.63,40.015,17582942,SBUX +2014-12-19,40.1,40.145,39.705,39.72,17444212,SBUX +2014-12-22,39.925,40.275,39.885,40.27,7583856,SBUX +2014-12-23,40.75,41.025,40.565,40.715,8147722,SBUX +2014-12-24,40.735,40.93,40.59,40.635,2602398,SBUX +2014-12-26,40.715,41.2,40.69,40.915,4550364,SBUX +2014-12-29,40.83,41.275,40.75,41.19,4796662,SBUX +2014-12-30,41.035,41.44,40.895,40.895,5253454,SBUX +2014-12-31,41.095,41.665,41.0,41.025,7628772,SBUX +2015-01-02,41.065,41.4875,40.445,40.72,6906098,SBUX +2015-01-05,40.07,40.335,39.745,39.94,11623796,SBUX +2015-01-06,40.17,40.195,39.28,39.615,7664340,SBUX +2015-01-07,39.875,40.615,39.7,40.59,9732554,SBUX +2015-01-08,41.165,41.65,41.01,41.245,13170548,SBUX +2015-01-09,40.495,40.755,39.56,39.895,27556706,SBUX +2015-01-12,40.145,40.415,39.91,40.115,10021486,SBUX +2015-01-13,40.74,41.07,40.065,40.435,11040702,SBUX +2015-01-14,40.025,40.39,39.805,40.21,9295084,SBUX +2015-01-15,40.3,40.45,39.595,39.79,8126602,SBUX +2015-01-16,39.63,40.39,39.5,40.305,9015502,SBUX +2015-01-20,40.4,40.735,40.165,40.6125,10738304,SBUX +2015-01-21,40.525,40.815,40.265,40.645,10844182,SBUX +2015-01-22,40.68,41.42,40.445,41.37,23913056,SBUX +2015-01-23,43.25,44.35,43.22,44.11,38107194,SBUX +2015-01-26,44.045,44.25,43.705,44.06,14098574,SBUX +2015-01-27,43.89,44.605,43.725,44.17,10995808,SBUX +2015-01-28,44.35,44.795,43.745,43.7825,11963202,SBUX +2015-01-29,44.005,44.65,43.785,44.525,12475860,SBUX +2015-01-30,44.29,44.47,43.695,43.765,10070456,SBUX +2015-02-02,43.84,44.045,42.93,43.995,13638832,SBUX +2015-02-03,43.99,44.245,43.465,44.245,9252426,SBUX +2015-02-04,44.0,44.715,43.995,44.35,11496698,SBUX +2015-02-05,44.355,44.885,44.355,44.82,7598672,SBUX +2015-02-06,44.75,44.8375,44.34,44.5,7835332,SBUX +2015-02-09,44.255,44.6035,44.07,44.41,6911614,SBUX +2015-02-10,44.685,45.69,44.665,45.59,12469500,SBUX +2015-02-11,45.58,45.895,45.3525,45.395,6466910,SBUX +2015-02-12,45.545,45.97,45.045,45.9125,7197558,SBUX +2015-02-13,45.995,45.995,45.4525,45.79,6109522,SBUX +2015-02-17,45.885,46.12,45.63,46.015,6386900,SBUX +2015-02-18,46.195,46.665,46.0,46.5,6541986,SBUX +2015-02-19,46.575,46.89,46.505,46.585,6109176,SBUX +2015-02-20,46.73,46.835,46.485,46.755,6462662,SBUX +2015-02-23,46.925,46.964,46.54,46.79,5854572,SBUX +2015-02-24,46.63,46.995,46.58,46.725,6337888,SBUX +2015-02-25,46.65,47.415,46.625,47.13,8120660,SBUX +2015-02-26,47.15,47.4113,47.04,47.275,6816352,SBUX +2015-02-27,47.395,47.4,46.635,46.7425,8658404,SBUX +2015-03-02,46.665,47.275,46.665,47.1125,7947018,SBUX +2015-03-03,47.05,47.105,46.6,47.0,7578374,SBUX +2015-03-04,46.905,47.095,46.005,46.53,7774534,SBUX +2015-03-05,46.64,46.98,46.5303,46.815,5848750,SBUX +2015-03-06,46.65,46.8475,45.94,46.1075,6814414,SBUX +2015-03-09,46.195,46.565,46.045,46.52,5984880,SBUX +2015-03-10,46.08,46.4549,45.885,46.09,6076984,SBUX +2015-03-11,46.06,46.49,45.61,45.71,8185894,SBUX +2015-03-12,45.985,46.765,45.92,46.69,7295344,SBUX +2015-03-13,46.34,47.185,46.34,46.645,5835252,SBUX +2015-03-16,47.0,47.31,46.87,47.0225,6772606,SBUX +2015-03-17,46.88,47.24,46.8,47.1925,5508796,SBUX +2015-03-18,47.0,48.285,46.675,47.92,15429928,SBUX +2015-03-19,48.38,49.6,48.19,48.88,22020618,SBUX +2015-03-20,49.245,49.45,48.505,48.73,17360612,SBUX +2015-03-23,48.94,48.95,48.355,48.685,7985986,SBUX +2015-03-24,48.555,49.165,48.38,48.9575,7718488,SBUX +2015-03-25,49.07,49.24,47.885,47.885,9907170,SBUX +2015-03-26,47.675,47.875,46.83,47.54,10344304,SBUX +2015-03-27,47.5,47.975,47.375,47.535,7993350,SBUX +2015-03-30,48.02,48.25,47.75,47.99,6830270,SBUX +2015-03-31,47.835,48.1,47.345,47.35,8717754,SBUX +2015-04-01,47.14,47.25,46.28,46.51,14125350,SBUX +2015-04-02,46.71,47.3175,46.61,47.195,8863018,SBUX +2015-04-06,46.925,47.4,46.725,47.26,6058894,SBUX +2015-04-07,47.205,47.48,46.98,47.035,5354670,SBUX +2015-04-08,46.92,47.64,46.92,47.615,6827888,SBUX +2015-04-09,47.65,47.99,47.25,47.96,7109621,SBUX +2015-04-10,48.6,48.6,47.88,48.17,6643106,SBUX +2015-04-13,48.56,48.89,48.38,48.5,8171030,SBUX +2015-04-14,48.52,48.71,47.97,48.3,5952424,SBUX +2015-04-15,48.81,48.81,48.13,48.14,5162169,SBUX +2015-04-16,48.23,48.48,48.16,48.245,5312499,SBUX +2015-04-17,47.9,48.0,47.39,47.62,7539865,SBUX +2015-04-20,47.9,48.12,47.7,47.97,4868425,SBUX +2015-04-21,48.35,48.4799,48.02,48.37,6213360,SBUX +2015-04-22,48.5,48.6,47.98,48.335,7248119,SBUX +2015-04-23,48.55,49.7,48.28,49.43,15866051,SBUX +2015-04-24,51.32,52.09,50.62,51.84,22284881,SBUX +2015-04-27,51.81,51.94,50.76,50.87,11222608,SBUX +2015-04-28,50.6,50.8,49.9801,50.61,8882901,SBUX +2015-04-29,50.42,50.95,50.2,50.65,7161992,SBUX +2015-04-30,50.63,50.68,49.43,49.58,8492048,SBUX +2015-05-01,49.95,50.42,49.68,50.29,5916509,SBUX +2015-05-04,50.3,50.93,50.27,50.445,7493420,SBUX +2015-05-05,49.94,50.05,49.36,49.405,10691207,SBUX +2015-05-06,49.68,49.7,48.57,48.93,8033489,SBUX +2015-05-07,48.74,49.55,48.72,49.35,5681417,SBUX +2015-05-08,49.99,50.4265,49.46,49.78,6039840,SBUX +2015-05-11,49.71,50.22,49.38,49.5,5047180,SBUX +2015-05-12,49.15,49.99,49.0,49.71,5868552,SBUX +2015-05-13,49.85,50.15,49.425,49.59,4927094,SBUX +2015-05-14,49.98,50.59,49.67,50.555,7339742,SBUX +2015-05-15,50.79,50.85,50.39,50.8,6016694,SBUX +2015-05-18,50.65,51.29,50.56,51.18,8999761,SBUX +2015-05-19,51.48,51.715,50.96,51.42,6976052,SBUX +2015-05-20,51.31,51.44,50.43,51.03,5644662,SBUX +2015-05-21,50.94,51.45,50.78,51.33,5084042,SBUX +2015-05-22,51.33,51.65,51.21,51.48,5857672,SBUX +2015-05-26,51.38,51.78,50.66,50.84,7369923,SBUX +2015-05-27,51.04,51.7,50.91,51.59,6213573,SBUX +2015-05-28,51.84,51.94,51.445,51.81,5874382,SBUX +2015-05-29,51.95,52.23,51.45,51.96,9399112,SBUX +2015-06-01,51.96,52.46,51.67,52.22,7075082,SBUX +2015-06-02,51.98,52.3,51.66,51.73,7877799,SBUX +2015-06-03,52.0,52.27,51.67,52.12,5522702,SBUX +2015-06-04,51.87,52.18,51.57,51.72,6230805,SBUX +2015-06-05,51.57,52.44,51.27,52.19,7123248,SBUX +2015-06-08,52.0,52.23,51.49,51.53,6320181,SBUX +2015-06-09,51.35,51.7,51.1,51.54,5034038,SBUX +2015-06-10,51.8,52.86,51.66,52.69,8003611,SBUX +2015-06-11,52.81,53.0,52.44,52.49,6030167,SBUX +2015-06-12,52.41,52.74,52.16,52.63,5236747,SBUX +2015-06-15,52.23,52.46,52.01,52.27,5554964,SBUX +2015-06-16,52.27,53.14,52.2,52.965,6106529,SBUX +2015-06-17,53.09,53.47,52.72,53.24,6735294,SBUX +2015-06-18,53.5,54.28,53.4,54.11,10712142,SBUX +2015-06-19,54.08,54.44,53.84,53.93,10609714,SBUX +2015-06-22,54.325,54.43,53.88,53.9,7100665,SBUX +2015-06-23,54.04,54.18,53.72,54.115,5679984,SBUX +2015-06-24,53.75,53.97,53.5194,53.71,5524969,SBUX +2015-06-25,54.09,54.45,54.0,54.07,5389863,SBUX +2015-06-26,54.46,54.75,54.3,54.62,6637183,SBUX +2015-06-29,53.87,54.39,53.5144,53.55,6534077,SBUX +2015-06-30,54.15,54.3,53.14,53.615,9793969,SBUX +2015-07-01,53.86,54.21,53.6,53.89,6107698,SBUX +2015-07-02,54.03,54.7,53.9501,54.24,5684667,SBUX +2015-07-06,53.64,54.405,53.63,54.305,5396439,SBUX +2015-07-07,54.29,54.54,53.36,54.375,9462256,SBUX +2015-07-08,53.86,54.02,53.3101,53.39,8139303,SBUX +2015-07-09,54.07,54.5,53.88,54.05,7681013,SBUX +2015-07-10,54.54,54.732,54.18,54.57,8087800,SBUX +2015-07-13,55.0,55.89,54.9,55.7,7477170,SBUX +2015-07-14,55.96,56.06,55.52,55.75,7028025,SBUX +2015-07-15,55.92,55.95,55.285,55.34,8212573,SBUX +2015-07-16,56.06,56.16,55.66,55.74,7305520,SBUX +2015-07-17,55.9,55.9,55.37,55.69,8715431,SBUX +2015-07-20,55.73,56.74,55.7,56.21,8029646,SBUX +2015-07-21,56.38,56.47,55.78,56.2,6717452,SBUX +2015-07-22,56.43,56.87,56.25,56.69,6876059,SBUX +2015-07-23,56.98,57.0,56.16,56.56,12439229,SBUX +2015-07-24,59.12,59.31,57.15,57.29,14559687,SBUX +2015-07-27,57.21,57.47,56.85,56.98,8993523,SBUX +2015-07-28,57.39,57.4,56.56,57.14,8689215,SBUX +2015-07-29,57.38,57.8,57.14,57.51,8470512,SBUX +2015-07-30,57.3,58.15,57.01,58.06,7337948,SBUX +2015-07-31,58.44,58.44,57.73,57.93,6519528,SBUX +2015-08-03,58.62,58.96,58.0386,58.19,7664002,SBUX +2015-08-04,58.25,58.72,58.03,58.7,9113083,SBUX +2015-08-05,59.15,59.3198,58.83,59.01,7349063,SBUX +2015-08-06,59.13,59.2,57.09,57.23,11064470,SBUX +2015-08-07,57.33,57.36,56.51,57.2,7781995,SBUX +2015-08-10,57.29,57.626000000000005,55.75,56.27,12029150,SBUX +2015-08-11,55.79,56.435,55.24,56.35,8062553,SBUX +2015-08-12,55.69,56.4,54.95,56.38,10075571,SBUX +2015-08-13,56.52,57.25,56.51,56.85,6731474,SBUX +2015-08-14,56.95,57.12,56.66,57.1,4803903,SBUX +2015-08-17,57.0,57.76,56.73,57.74,5768362,SBUX +2015-08-18,57.96,58.06,57.66,57.83,5575441,SBUX +2015-08-19,57.58,58.08,57.115,57.59,6044193,SBUX +2015-08-20,57.0,57.15,55.77,55.81,7470885,SBUX +2015-08-21,54.72,54.86,52.601000000000006,52.84,20211503,SBUX +2015-08-24,48.05,52.67,42.05,50.34,27158813,SBUX +2015-08-25,52.96,53.61,51.05,51.09,19659002,SBUX +2015-08-26,52.99,54.15,51.27,53.96,15517591,SBUX +2015-08-27,54.74,56.21,54.41,55.95,15987923,SBUX +2015-08-28,55.84,56.31,55.2,55.63,7584826,SBUX +2015-08-31,55.23,55.47,54.5,54.71,7971204,SBUX +2015-09-01,52.82,54.36,52.74,53.5,13424932,SBUX +2015-09-02,54.47,55.29,53.751000000000005,55.26,9891071,SBUX +2015-09-03,55.72,55.76,54.475,54.69,7968868,SBUX +2015-09-04,53.87,54.57,53.84,54.28,6994267,SBUX +2015-09-08,55.31,55.45,54.53,55.21,8051710,SBUX +2015-09-09,55.9,56.0,54.57,54.69,8406656,SBUX +2015-09-10,54.34,55.69,54.33,55.37,8924778,SBUX +2015-09-11,55.19,56.54,55.03,56.53,8363110,SBUX +2015-09-14,56.54,56.91,56.05,56.29,5464463,SBUX +2015-09-15,56.43,57.21,56.115,56.91,6741341,SBUX +2015-09-16,56.83,57.35,56.21,57.26,6593362,SBUX +2015-09-17,57.32,58.1,57.04,57.28,7525349,SBUX +2015-09-18,56.49,57.63,56.28,56.84,16268035,SBUX +2015-09-21,57.2,57.84,56.96,57.54,6220131,SBUX +2015-09-22,56.85,57.25,56.7,57.12,8585093,SBUX +2015-09-23,57.16,57.93,57.05,57.79,6829205,SBUX +2015-09-24,57.38,58.54,57.17,58.37,10027330,SBUX +2015-09-25,58.92,58.96,57.74,57.99,10627026,SBUX +2015-09-28,58.01,58.43,55.6201,55.77,11548114,SBUX +2015-09-29,55.85,56.3,54.81,55.72,9392065,SBUX +2015-09-30,56.4,56.9,55.61,56.84,9799610,SBUX +2015-10-01,56.99,57.5,55.89,57.48,8497124,SBUX +2015-10-02,56.99,58.09,56.5406,58.08,9036765,SBUX +2015-10-05,58.49,59.18,58.07,59.04,8198998,SBUX +2015-10-06,58.82,59.14,58.22,58.69,5642949,SBUX +2015-10-07,58.62,58.83,57.9,58.78,8138313,SBUX +2015-10-08,58.78,59.71,58.39,59.46,6834836,SBUX +2015-10-09,59.47,60.11,59.3,60.07,7969884,SBUX +2015-10-12,60.35,60.89,60.04,60.54,6430301,SBUX +2015-10-13,60.34,60.745,60.0161,60.16,6262774,SBUX +2015-10-14,60.0,60.17,58.43,58.82,8365604,SBUX +2015-10-15,58.95,59.83,58.08,59.69,9745919,SBUX +2015-10-16,59.96,60.29,59.455,59.93,12860812,SBUX +2015-10-19,60.13,61.29,59.8745,60.97,8117213,SBUX +2015-10-20,61.22,61.36,60.56,60.88,6089097,SBUX +2015-10-21,61.07,61.12,60.16,60.53,6063923,SBUX +2015-10-22,60.96,61.7099,60.17,61.49,9182031,SBUX +2015-10-23,62.11,62.8,61.6201,62.61,8205994,SBUX +2015-10-26,62.98,63.84,62.97,63.43,9751716,SBUX +2015-10-27,63.37,63.41,62.19,62.71,8973243,SBUX +2015-10-28,63.11,63.52,62.42,63.51,9627260,SBUX +2015-10-29,63.42,63.5,61.713,62.5,14839093,SBUX +2015-10-30,63.69,64.0,62.26,62.57,16822302,SBUX +2015-11-02,63.01,63.1,62.12,62.24,8547237,SBUX +2015-11-03,62.0,62.975,61.65,62.8,8847718,SBUX +2015-11-04,63.0355,63.0355,61.34,61.96,9085091,SBUX +2015-11-05,62.17,62.46,62.01,62.28,6144979,SBUX +2015-11-06,62.05,62.24,61.61,61.97,6616305,SBUX +2015-11-09,61.75,61.97,60.86,61.34,6838326,SBUX +2015-11-10,61.54,62.32,61.21,62.18,6689040,SBUX +2015-11-11,62.55,62.57,61.81,61.87,4437315,SBUX +2015-11-12,61.34,61.65,60.75,61.07,6793779,SBUX +2015-11-13,60.89,61.345,59.61,59.74,8821593,SBUX +2015-11-16,59.5,60.69,59.5,60.68,8096603,SBUX +2015-11-17,60.95,61.56,60.435,60.55,6695251,SBUX +2015-11-18,60.66,61.865,60.33,61.8,7215255,SBUX +2015-11-19,61.8,61.93,61.39,61.46,5154366,SBUX +2015-11-20,61.96,62.15,61.58,61.99,8302476,SBUX +2015-11-23,62.14,63.19,62.1,62.64,8493485,SBUX +2015-11-24,62.06,62.37,61.2199,61.96,7908223,SBUX +2015-11-25,62.05,62.5,62.0,62.19,4549913,SBUX +2015-11-27,62.19,62.38,61.93,62.18,2447902,SBUX +2015-11-30,62.1,62.29,61.201,61.39,9863771,SBUX +2015-12-01,61.08,61.68,60.51,61.37,10910838,SBUX +2015-12-02,61.63,61.71,61.115,61.22,6587454,SBUX +2015-12-03,61.37,61.4468,59.15,59.55,12056103,SBUX +2015-12-04,59.86,61.87,59.6,61.75,9100588,SBUX +2015-12-07,61.75,61.95,61.44,61.89,5967809,SBUX +2015-12-08,61.69,62.43,61.52,62.16,6664947,SBUX +2015-12-09,61.71,62.538999999999994,60.82,61.18,8541573,SBUX +2015-12-10,61.13,62.14,61.01,61.87,6623896,SBUX +2015-12-11,60.86,61.19,59.6,59.82,11489255,SBUX +2015-12-14,60.04,60.14,58.61,59.92,13453719,SBUX +2015-12-15,60.55,60.68,59.97,59.98,7842073,SBUX +2015-12-16,60.32,60.5,59.51,60.35,9281835,SBUX +2015-12-17,60.66,60.83,59.47,59.515,9079430,SBUX +2015-12-18,59.2,59.5,58.27,58.62,18099462,SBUX +2015-12-21,58.89,59.615,58.66,59.54,7187470,SBUX +2015-12-22,59.94,60.07,59.275,59.99,6501424,SBUX +2015-12-23,60.26,60.37,59.96,60.34,4510229,SBUX +2015-12-24,60.37,60.51,60.17,60.32,2215418,SBUX +2015-12-28,60.02,60.33,59.58,60.19,4437236,SBUX +2015-12-29,60.46,61.32,60.35,61.13,5477335,SBUX +2015-12-30,61.22,61.4,60.75,60.82,3973912,SBUX +2015-12-31,60.65,60.81,60.02,60.03,4960875,SBUX +2016-01-04,58.77,58.83,57.6,58.26,13521544,SBUX +2016-01-05,58.79,58.79,57.98,58.65,9617778,SBUX +2016-01-06,57.7,58.53,57.64,58.13,8266322,SBUX +2016-01-07,56.88,57.91,56.16,56.69,11140877,SBUX +2016-01-08,57.41,57.73,56.53,56.63,10427021,SBUX +2016-01-11,57.0,58.12,56.78,57.82,10757313,SBUX +2016-01-12,58.39,59.53,58.18,59.46,12375826,SBUX +2016-01-13,59.8,60.0,57.8001,57.87,11303603,SBUX +2016-01-14,57.51,59.43,56.92100000000001,58.98,11444106,SBUX +2016-01-15,57.07,58.39,56.75,58.0,15246127,SBUX +2016-01-19,58.67,59.39,58.12,58.55,12288950,SBUX +2016-01-20,57.57,57.96,54.94,56.92,22786359,SBUX +2016-01-21,57.84,59.38,57.67,59.03,20888519,SBUX +2016-01-22,57.55,59.4,57.41,59.17,32820193,SBUX +2016-01-25,59.36,59.38,57.61,57.71,13554262,SBUX +2016-01-26,57.92,58.865,57.8,58.61,8898100,SBUX +2016-01-27,58.87,58.96,57.255,57.63,12491252,SBUX +2016-01-28,58.29,59.42,58.0,59.285,11832368,SBUX +2016-01-29,59.78,60.88,59.64,60.77,13224438,SBUX +2016-02-01,60.66,61.785,60.27,61.4,9529094,SBUX +2016-02-02,60.66,60.9,60.18,60.695,9407352,SBUX +2016-02-03,60.88,61.13,58.5,59.53,12254460,SBUX +2016-02-04,59.41,59.4487,57.99,58.29,13944926,SBUX +2016-02-05,58.1,58.2,54.25,54.49,24529008,SBUX +2016-02-08,53.09,54.47,52.63,54.14,21457492,SBUX +2016-02-09,53.19,55.2886,53.17,54.42,11605059,SBUX +2016-02-10,55.28,56.35,55.01,55.14,11663942,SBUX +2016-02-11,53.89,55.39,53.55,54.92,12106062,SBUX +2016-02-12,55.56,56.04,55.04,55.86,8680205,SBUX +2016-02-16,56.79,56.85,55.98,56.41,11594766,SBUX +2016-02-17,56.7,57.66,56.16,57.63,11955188,SBUX +2016-02-18,57.57,57.57,56.67,56.96,8493953,SBUX +2016-02-19,56.92,57.86,56.52,57.67,9033620,SBUX +2016-02-22,58.63,58.95,58.17,58.87,8390689,SBUX +2016-02-23,58.45,58.9,58.0,58.46,7064095,SBUX +2016-02-24,57.21,58.35,56.28,58.11,10780882,SBUX +2016-02-25,58.46,58.75,58.0,58.75,6262127,SBUX +2016-02-26,59.0,59.21,57.92,58.34,7473374,SBUX +2016-02-29,58.25,59.15,58.1,58.21,7645081,SBUX +2016-03-01,58.77,60.2,58.5,60.04,9183562,SBUX +2016-03-02,59.83,60.0,58.83,59.56,8856392,SBUX +2016-03-03,59.12,59.2,58.2,59.04,8262455,SBUX +2016-03-04,59.14,59.19,58.23,58.7,8344773,SBUX +2016-03-07,58.44,58.67,57.31,58.0,9204624,SBUX +2016-03-08,57.58,58.23,57.26,57.6,8127426,SBUX +2016-03-09,57.78,57.97,56.79,57.07,9734589,SBUX +2016-03-10,57.51,57.86,56.92,57.52,7023785,SBUX +2016-03-11,58.1,58.1,56.57,57.59,15497560,SBUX +2016-03-14,57.58,58.78,57.5,58.65,9053250,SBUX +2016-03-15,58.32,59.1566,58.17,59.08,7428469,SBUX +2016-03-16,58.65,59.82,58.65,59.67,8069427,SBUX +2016-03-17,59.47,59.98,59.37,59.55,7734658,SBUX +2016-03-18,59.91,60.45,59.4295,59.7,14313578,SBUX +2016-03-21,59.56,59.8609,59.015,59.1,6487185,SBUX +2016-03-22,59.0,59.55,58.57,59.38,8246837,SBUX +2016-03-23,59.14,59.395,58.69,58.83,5794495,SBUX +2016-03-24,58.7,58.79,58.28,58.36,5948307,SBUX +2016-03-28,58.56,59.47,58.4,58.96,5791603,SBUX +2016-03-29,58.82,59.735,58.82,59.55,6031947,SBUX +2016-03-30,60.0,60.26,59.51,60.01,5723499,SBUX +2016-03-31,59.77,60.21,59.68,59.7,5622834,SBUX +2016-04-01,59.61,61.17,59.41,61.02,9401126,SBUX +2016-04-04,61.1,61.1839,60.08,60.25,5799864,SBUX +2016-04-05,59.88,60.23,59.44,60.04,4994792,SBUX +2016-04-06,60.02,60.91,59.91,60.83,5667253,SBUX +2016-04-07,60.59,61.54,60.54,61.17,8239174,SBUX +2016-04-08,61.5,61.64,60.7,61.04,5064894,SBUX +2016-04-11,61.22,61.5,60.78,60.9,6103358,SBUX +2016-04-12,58.95,59.68,58.37,59.5,17565750,SBUX +2016-04-13,60.32,61.08,59.75,60.21,9898527,SBUX +2016-04-14,60.26,60.4,59.91,60.13,5157368,SBUX +2016-04-15,60.24,60.6246,60.01,60.51,5965310,SBUX +2016-04-18,60.69,61.07,60.355,60.89,7228573,SBUX +2016-04-19,61.16,61.25,60.48,60.9,7283570,SBUX +2016-04-20,61.04,61.43,60.85,60.9,5558770,SBUX +2016-04-21,60.9,61.1,60.48,60.64,12799083,SBUX +2016-04-22,59.01,59.1,57.03,57.68,29836693,SBUX +2016-04-25,57.62,57.96,57.58,57.77,8428038,SBUX +2016-04-26,58.05,58.67,57.56,57.72,8839067,SBUX +2016-04-27,57.51,57.65,56.62,56.9,12390767,SBUX +2016-04-28,56.59,57.36,56.32,56.42,9196560,SBUX +2016-04-29,56.02,56.43,55.29,56.23,12133364,SBUX +2016-05-02,56.29,57.37,56.11,57.36,8616189,SBUX +2016-05-03,56.7,57.059,56.14,56.25,7854260,SBUX +2016-05-04,55.98,56.65,55.8,56.39,6508507,SBUX +2016-05-05,56.37,56.77,56.01,56.25,6215367,SBUX +2016-05-06,55.96,56.32,55.38,56.31,6378690,SBUX +2016-05-09,56.32,56.93,56.23,56.64,5976229,SBUX +2016-05-10,56.85,57.6,56.71,57.49,7931185,SBUX +2016-05-11,57.13,57.39,56.09,56.23,8118920,SBUX +2016-05-12,56.57,56.79,55.82,56.3,6870281,SBUX +2016-05-13,56.43,56.6,55.73,55.82,5466041,SBUX +2016-05-16,55.7,55.77,55.2,55.53,9536192,SBUX +2016-05-17,55.38,55.64,54.51,54.88,10530018,SBUX +2016-05-18,54.76,55.17,54.38,54.8,7468496,SBUX +2016-05-19,54.43,54.615,54.19,54.55,7582848,SBUX +2016-05-20,54.88,55.3736,54.58,54.62,8430783,SBUX +2016-05-23,54.62,54.8171,54.291000000000004,54.6,7352054,SBUX +2016-05-24,54.74,55.62,54.68,55.44,7748697,SBUX +2016-05-25,55.2,55.46,54.95,55.15,8126058,SBUX +2016-05-26,55.55,55.8699,54.95,55.29,9451708,SBUX +2016-05-27,55.36,55.55,55.1,55.15,6631120,SBUX +2016-05-31,55.5,55.5,54.7,54.89,12043976,SBUX +2016-06-01,54.76,55.49,54.72,54.82,8761577,SBUX +2016-06-02,54.9,55.0,54.455,54.62,8307488,SBUX +2016-06-03,54.71,55.08,54.4,54.61,6649224,SBUX +2016-06-06,54.72,55.87,54.69,55.59,9900917,SBUX +2016-06-07,55.65,56.1,55.28,55.3,7173072,SBUX +2016-06-08,55.5,55.5,54.9,55.22,7623851,SBUX +2016-06-09,55.15,55.61,55.06,55.58,5927854,SBUX +2016-06-10,54.92,55.2,54.5,54.865,8118651,SBUX +2016-06-13,54.79,55.6,54.76,55.04,7928722,SBUX +2016-06-14,55.05,55.58,55.0101,55.57,8036517,SBUX +2016-06-15,55.64,56.09,55.27,55.35,7447317,SBUX +2016-06-16,54.9,55.59,54.41,55.53,7968033,SBUX +2016-06-17,55.61,55.62,55.04,55.31,9503017,SBUX +2016-06-20,55.77,56.28,55.38,55.38,7286681,SBUX +2016-06-21,55.52,56.03,55.45,55.81,7445109,SBUX +2016-06-22,55.88,55.98,55.49,55.61,7215411,SBUX +2016-06-23,55.98,56.195,55.9,56.13,5569431,SBUX +2016-06-24,54.05,55.57,54.01,54.68,14654672,SBUX +2016-06-27,54.2,54.48,53.41,53.69,11650798,SBUX +2016-06-28,54.1,54.9,53.95,54.85,8416950,SBUX +2016-06-29,55.42,56.945,55.36,56.74,11103787,SBUX +2016-06-30,56.81,57.19,56.516000000000005,57.12,10215193,SBUX +2016-07-01,57.04,57.36,56.845,56.99,8330308,SBUX +2016-07-05,56.81,56.96,56.55,56.77,7274208,SBUX +2016-07-06,56.52,57.11,56.32,56.75,8474188,SBUX +2016-07-07,56.66,57.0,56.47,56.91,6813347,SBUX +2016-07-08,56.92,57.0,56.36,56.51,12655500,SBUX +2016-07-11,56.8,56.92,56.06,56.32,10394366,SBUX +2016-07-12,56.65,57.6,56.505,57.48,10998486,SBUX +2016-07-13,56.8,57.26,56.35,56.48,12183638,SBUX +2016-07-14,57.0,57.68,56.97,57.59,11353599,SBUX +2016-07-15,57.69,57.74,57.125,57.41,8494000,SBUX +2016-07-18,57.59,57.59,56.85,56.92,7614635,SBUX +2016-07-19,56.85,57.135,56.545,56.76,8719772,SBUX +2016-07-20,57.0,57.66,56.705,57.54,9446396,SBUX +2016-07-21,57.62,57.67,57.0375,57.6,16151699,SBUX +2016-07-22,57.6,58.24,57.2,57.9,23899275,SBUX +2016-07-25,57.72,58.09,57.5,57.95,10486019,SBUX +2016-07-26,58.58,58.84,58.2,58.31,10106663,SBUX +2016-07-27,58.38,58.4,57.67,57.85,6551777,SBUX +2016-07-28,57.88,58.31,57.74,58.21,6830059,SBUX +2016-07-29,58.18,58.43,57.92,58.05,6914907,SBUX +2016-08-01,58.0,58.05,57.43,57.63,7997576,SBUX +2016-08-02,57.25,57.34,56.54,56.73,7574622,SBUX +2016-08-03,56.46,56.59,55.72,55.94,11484792,SBUX +2016-08-04,56.05,56.29,55.38,55.42,11193748,SBUX +2016-08-05,55.8,56.12,55.52,55.9,9206197,SBUX +2016-08-08,55.97,55.99,55.17,55.36,9129307,SBUX +2016-08-09,55.39,55.71,55.18,55.2,7136683,SBUX +2016-08-10,55.37,55.71,55.11,55.62,6991077,SBUX +2016-08-11,55.75,55.96,55.46,55.47,6191412,SBUX +2016-08-12,55.27,55.745,55.23,55.47,5039757,SBUX +2016-08-15,55.65,55.7,55.18,55.25,5968728,SBUX +2016-08-16,55.25,55.57,54.92,55.37,5751178,SBUX +2016-08-17,55.77,55.92,55.43,55.8,7410631,SBUX +2016-08-18,55.78,55.9,55.49,55.53,5390990,SBUX +2016-08-19,55.46,55.56,54.85,54.94,8981214,SBUX +2016-08-22,54.98,55.92,54.95,55.85,8837808,SBUX +2016-08-23,56.17,56.54,56.0,56.4,7827873,SBUX +2016-08-24,57.0,57.98,56.95,57.09,13200460,SBUX +2016-08-25,57.04,57.45,56.9,57.29,6686589,SBUX +2016-08-26,57.48,57.83,56.995,57.29,6940511,SBUX +2016-08-29,57.22,57.48,56.61,56.8,7026700,SBUX +2016-08-30,56.66,56.75,56.01,56.4,6377668,SBUX +2016-08-31,56.31,56.42,55.905,56.23,6996894,SBUX +2016-09-01,56.3,56.56,55.83,56.31,6230148,SBUX +2016-09-02,56.52,56.65,55.985,56.18,7441463,SBUX +2016-09-06,56.18,56.42,55.69,56.02,6472907,SBUX +2016-09-07,56.19,56.6,56.12,56.32,11428644,SBUX +2016-09-08,56.1,56.15,55.2,55.3,12673626,SBUX +2016-09-09,55.14,55.2,54.3,54.35,10658120,SBUX +2016-09-12,53.92,54.79,53.92,54.71,11002544,SBUX +2016-09-13,54.39,54.55,53.75,53.98,10050215,SBUX +2016-09-14,54.26,54.35,53.8,53.9,6707850,SBUX +2016-09-15,53.96,54.13,53.54,54.11,8080426,SBUX +2016-09-16,53.94,54.09,53.41,53.74,10207750,SBUX +2016-09-19,53.96,53.9739,52.9,53.01,9231628,SBUX +2016-09-20,53.4,53.435,53.05,53.3,8731499,SBUX +2016-09-21,53.43,54.03,53.28,53.98,9213637,SBUX +2016-09-22,54.2,54.6,53.965,54.39,9096709,SBUX +2016-09-23,54.0,54.56,54.0,54.43,6945147,SBUX +2016-09-26,54.28,54.34,53.86,54.04,7755629,SBUX +2016-09-27,54.0,54.21,53.82,54.19,6463747,SBUX +2016-09-28,53.88,54.08,53.53,53.98,8593496,SBUX +2016-09-29,53.88,53.995,52.91,53.45,11993517,SBUX +2016-09-30,53.65,54.385,53.56,54.14,13767754,SBUX +2016-10-03,54.1,54.15,53.665,53.84,5479037,SBUX +2016-10-04,54.13,54.13,53.41,53.53,6176358,SBUX +2016-10-05,53.5,53.76,53.275,53.35,7451916,SBUX +2016-10-06,53.3,53.48,53.03,53.14,6130270,SBUX +2016-10-07,53.37,53.605,53.0,53.46,7279723,SBUX +2016-10-10,53.53,53.6,53.27,53.3,7224335,SBUX +2016-10-11,53.13,53.4,52.74,52.92,9720407,SBUX +2016-10-12,53.01,53.47,52.78,53.16,6320462,SBUX +2016-10-13,52.88,53.13,52.6662,52.95,6958128,SBUX +2016-10-14,53.12,53.37,52.96,53.08,6430136,SBUX +2016-10-17,52.94,53.145,52.69,52.76,5223526,SBUX +2016-10-18,53.24,53.3197,52.59,52.61,6550399,SBUX +2016-10-19,52.91,53.74,52.9,53.15,9095261,SBUX +2016-10-20,53.36,53.74,52.91,53.59,9286800,SBUX +2016-10-21,53.42,53.7,53.25,53.63,6767204,SBUX +2016-10-24,53.9,54.46,53.8939,54.18,6919714,SBUX +2016-10-25,54.1,54.17,53.5,53.67,6052830,SBUX +2016-10-26,53.6,53.84,53.355,53.63,5817798,SBUX +2016-10-27,53.6,53.83,53.13,53.59,7899957,SBUX +2016-10-28,53.65,53.84,53.11,53.53,6620333,SBUX +2016-10-31,53.7,53.7,53.055,53.07,9142509,SBUX +2016-11-01,53.14,53.21,52.085,52.5,15425819,SBUX +2016-11-02,52.34,53.46,52.31,52.98,10851658,SBUX +2016-11-03,52.99,53.0,51.34,51.77,21847292,SBUX +2016-11-04,51.43,53.74,50.84,52.75,21956848,SBUX +2016-11-07,53.5,54.68,53.19,54.49,14916848,SBUX +2016-11-08,54.4,54.79,54.115,54.62,9351994,SBUX +2016-11-09,53.2,54.82,52.8,54.58,13727777,SBUX +2016-11-10,54.64,54.817,53.51,53.57,13621701,SBUX +2016-11-11,53.43,53.99,53.25,53.93,8436435,SBUX +2016-11-14,53.93,54.47,53.5,54.22,10489826,SBUX +2016-11-15,54.09,54.69,53.9,54.59,9588036,SBUX +2016-11-16,54.33,55.52,54.26,55.44,10779155,SBUX +2016-11-17,55.215,55.9,55.06,55.85,8744504,SBUX +2016-11-18,55.72,56.12,55.42,55.77,8740953,SBUX +2016-11-21,55.51,56.16,55.51,56.1,8004000,SBUX +2016-11-22,56.32,57.15,55.88,57.12,10268720,SBUX +2016-11-23,56.91,57.64,56.9,57.59,8183628,SBUX +2016-11-25,57.7,57.7,57.255,57.43,3228848,SBUX +2016-11-28,57.0,57.86,56.76,57.59,8750925,SBUX +2016-11-29,57.64,58.21,57.5,58.17,10582850,SBUX +2016-11-30,58.19,58.25,57.86,57.97,9527959,SBUX +2016-12-01,57.34,58.52,57.2,58.51,12381607,SBUX +2016-12-02,56.648999999999994,57.75,56.57,57.21,16869957,SBUX +2016-12-05,56.96,57.84,56.96,57.5,7701167,SBUX +2016-12-06,57.66,57.7,57.14,57.44,7035674,SBUX +2016-12-07,57.54,58.85,57.45,58.76,9094812,SBUX +2016-12-08,59.0,59.25,58.4118,58.65,7972498,SBUX +2016-12-09,58.92,58.95,58.43,58.75,7091577,SBUX +2016-12-12,58.54,58.79,58.34,58.77,7736198,SBUX +2016-12-13,58.99,59.54,58.66,59.31,8878080,SBUX +2016-12-14,59.03,59.25,58.6,58.75,9022867,SBUX +2016-12-15,57.9675,58.21,57.52,57.71,11837756,SBUX +2016-12-16,58.01,58.07,57.56,57.66,10611461,SBUX +2016-12-19,57.44,57.98,57.44,57.65,6433824,SBUX +2016-12-20,57.81,58.06,57.32,57.7,4888284,SBUX +2016-12-21,57.5,57.87,57.41,57.44,5380537,SBUX +2016-12-22,57.31,57.4,56.72,57.11,6777656,SBUX +2016-12-23,57.29,57.36,56.89,57.01,4298476,SBUX +2016-12-27,56.99,57.3869,56.81,56.86,4186157,SBUX +2016-12-28,56.8,56.9,56.25,56.35,5548726,SBUX +2016-12-29,56.35,56.47,56.135,56.32,3781721,SBUX +2016-12-30,56.28,56.45,55.4,55.52,8344508,SBUX +2017-01-03,55.91,55.95,55.04,55.35,7809307,SBUX +2017-01-04,55.56,56.195,55.38,55.99,7796290,SBUX +2017-01-05,56.08,56.53,55.8099,56.46,7602321,SBUX +2017-01-06,56.63,57.27,56.08,57.13,8587812,SBUX +2017-01-09,57.26,58.335,57.25,58.2,12640515,SBUX +2017-01-10,58.22,58.26,57.83,57.88,6672024,SBUX +2017-01-11,57.8,58.12,57.59,58.1,6027960,SBUX +2017-01-12,58.0,58.13,57.64,58.03,4733015,SBUX +2017-01-13,58.03,58.1,57.65,57.85,4745840,SBUX +2017-01-17,57.62,58.25,57.41,58.0,5734666,SBUX +2017-01-18,58.32,58.58,58.03,58.45,7375725,SBUX +2017-01-19,58.31,58.45,57.715,57.89,7850480,SBUX +2017-01-20,58.14,58.2,57.41,57.66,7651562,SBUX +2017-01-23,57.42,57.9,57.15,57.76,6814368,SBUX +2017-01-24,57.93,58.5,57.76,58.44,10704103,SBUX +2017-01-25,58.67,58.93,58.45,58.7,7124547,SBUX +2017-01-26,58.7,59.0,58.26,58.46,12382416,SBUX +2017-01-27,55.75,56.59,55.65,56.12,28884899,SBUX +2017-01-30,56.0,56.24,55.58,55.9,13322010,SBUX +2017-01-31,55.8,55.87,54.88,55.22,14307985,SBUX +2017-02-01,55.49,55.5,53.81,53.9,18796871,SBUX +2017-02-02,54.04,54.39,53.85,53.87,15289650,SBUX +2017-02-03,54.21,55.1,54.01,55.06,14161693,SBUX +2017-02-06,55.01,55.75,54.9,55.73,13029829,SBUX +2017-02-07,55.79,55.84,55.1801,55.24,9910498,SBUX +2017-02-08,55.19,55.4957,55.1,55.22,11681938,SBUX +2017-02-09,55.23,56.12,55.21,55.81,11106757,SBUX +2017-02-10,55.73,56.395,55.56,56.22,11178950,SBUX +2017-02-13,56.5,56.66,56.03,56.11,8027939,SBUX +2017-02-14,56.02,56.61,56.02,56.58,8865947,SBUX +2017-02-15,56.56,56.88,56.305,56.86,6967179,SBUX +2017-02-16,56.96,56.99,56.53,56.73,8524519,SBUX +2017-02-17,56.8,57.57,56.71,57.35,11008366,SBUX +2017-02-21,57.41,57.81,57.4,57.54,8289185,SBUX +2017-02-22,57.52,57.85,57.35,57.57,7876599,SBUX +2017-02-23,57.6,57.79,57.39,57.64,7178627,SBUX +2017-02-24,57.61,57.71,57.145,57.48,7806190,SBUX +2017-02-27,57.24,57.3,56.66,56.78,7702400,SBUX +2017-02-28,56.71,57.06,56.55,56.87,8750655,SBUX +2017-03-01,57.27,57.4,56.94,57.14,7197973,SBUX +2017-03-02,57.07,57.19,56.85,57.12,6595418,SBUX +2017-03-03,56.7,57.26,56.7,57.1,7738064,SBUX +2017-03-06,56.78,56.81,56.33,56.68,9159983,SBUX +2017-03-07,56.58,56.75,56.02,56.2,10890313,SBUX +2017-03-08,56.15,56.35,55.54,55.74,13061632,SBUX +2017-03-09,55.75,55.8,54.81,55.19,17844248,SBUX +2017-03-10,55.39,55.4,54.415,54.53,13886431,SBUX +2017-03-13,54.57,54.8847,54.4,54.63,9090230,SBUX +2017-03-14,54.62,54.74,54.19,54.27,7892888,SBUX +2017-03-15,54.39,54.65,54.09,54.54,8712006,SBUX +2017-03-16,54.85,54.985,54.66,54.8,8074278,SBUX +2017-03-17,55.04,56.13,54.95,55.78,15822141,SBUX +2017-03-20,55.87,56.05,55.51,55.81,7948425,SBUX +2017-03-21,56.05,56.45,55.485,55.54,8030642,SBUX +2017-03-22,55.68,56.04,55.51,55.89,8452070,SBUX +2017-03-23,56.15,56.425,55.78,55.85,7357207,SBUX +2017-03-24,56.11,57.38,55.9,56.81,15763000,SBUX +2017-03-27,56.66,57.45,56.46,57.23,8861799,SBUX +2017-03-28,57.1,57.52,57.025,57.35,8331644,SBUX +2017-03-29,57.17,57.85,57.13,57.54,7001635,SBUX +2017-03-30,57.45,58.3,57.42,58.16,8677916,SBUX +2017-03-31,58.105,58.66,58.06,58.39,9156707,SBUX +2017-04-03,58.28,58.47,57.89,58.44,8989831,SBUX +2017-04-04,58.37,58.41,58.01,58.32,6474781,SBUX +2017-04-05,57.96,59.27,57.81,58.22,13656221,SBUX +2017-04-06,58.13,58.36,57.73,57.92,9112533,SBUX +2017-04-07,57.8,58.25,57.55,58.02,6773750,SBUX +2017-04-10,58.0,58.21,57.73,57.95,5266312,SBUX +2017-04-11,57.74,58.16,57.38,57.88,5855091,SBUX +2017-04-12,57.88,57.895,57.48,57.58,5450298,SBUX +2017-04-13,57.61,57.79,57.45,57.51,4871717,SBUX +2017-04-17,57.68,58.25,57.68,58.08,5626042,SBUX +2017-04-18,57.59,58.48,57.59,58.35,5449699,SBUX +2017-04-19,58.6,59.49,58.5,59.04,12000639,SBUX +2017-04-20,59.71,60.335,59.56,60.08,12445862,SBUX +2017-04-21,60.2,60.69,60.15,60.61,8799985,SBUX +2017-04-24,61.0,61.38,60.89,61.11,10721276,SBUX +2017-04-25,60.65,61.21,59.9237,60.96,11031475,SBUX +2017-04-26,61.23,61.75,60.98,61.56,8525419,SBUX +2017-04-27,61.63,61.94,61.19,61.3,15285342,SBUX +2017-04-28,59.41,60.18,58.99,60.06,25046130,SBUX +2017-05-01,60.0,60.6,59.7619,60.18,10910341,SBUX +2017-05-02,60.15,60.52,60.04,60.5,9152088,SBUX +2017-05-03,60.52,60.665,60.255,60.59,7706367,SBUX +2017-05-04,60.65,60.85,60.39,60.83,6874286,SBUX +2017-05-05,60.86,60.99,60.58,60.95,6443309,SBUX +2017-05-08,61.07,61.07,60.7,60.94,5588371,SBUX +2017-05-09,60.58,61.08,60.57,60.98,5806562,SBUX +2017-05-10,60.88,60.98,60.21,60.66,7198414,SBUX +2017-05-11,60.45,60.51,60.03,60.27,5516893,SBUX +2017-05-12,60.28,60.34,59.83,59.93,5647531,SBUX +2017-05-15,60.42,60.49,60.125,60.45,5904094,SBUX +2017-05-16,60.68,60.7,59.88,59.98,6303480,SBUX +2017-05-17,59.97,60.3239,59.55,59.73,7581230,SBUX +2017-05-18,59.73,59.94,58.87,59.82,8602411,SBUX +2017-05-19,59.94,61.92,59.94,61.36,12530995,SBUX +2017-05-22,61.03,61.72,61.03,61.23,6392124,SBUX +2017-05-23,61.4,61.5,60.86,61.15,5622524,SBUX +2017-05-24,61.4,62.0,60.94,61.89,7283789,SBUX +2017-05-25,62.01,63.11,61.9,62.9,8931804,SBUX +2017-05-26,63.01,63.42,62.97,63.3,6097990,SBUX +2017-05-30,63.07,63.41,63.01,63.26,7094586,SBUX +2017-05-31,63.27,63.61,63.08,63.61,7314634,SBUX +2017-06-01,63.51,63.82,63.35,63.75,6058263,SBUX +2017-06-02,63.88,64.68,63.7,64.57,7840374,SBUX +2017-06-05,64.85,64.87,64.18,64.27,6809284,SBUX +2017-06-06,64.22,64.35,64.05,64.16,5448439,SBUX +2017-06-07,64.13,64.295,63.34,63.5,8364994,SBUX +2017-06-08,63.44,63.58,62.02,62.24,11289266,SBUX +2017-06-09,62.37,62.48,61.8745,62.19,11240487,SBUX +2017-06-12,61.8,61.99,60.63,61.29,11071593,SBUX +2017-06-13,61.12,61.255,60.59,60.92,9384906,SBUX +2017-06-14,60.67,60.82,59.86,60.27,9703332,SBUX +2017-06-15,59.92,60.28,59.51,60.09,7515980,SBUX +2017-06-16,59.89,60.16,59.47,60.14,11522438,SBUX +2017-06-19,60.35,61.0,60.11,60.9,6778024,SBUX +2017-06-20,60.98,61.0,59.7,59.86,6985666,SBUX +2017-06-21,60.0,60.31,59.71,59.96,6027647,SBUX +2017-06-22,60.09,60.1,59.4,59.51,5602002,SBUX +2017-06-23,59.76,60.17,59.58,59.81,6469495,SBUX +2017-06-26,60.02,60.15,59.33,59.64,5674637,SBUX +2017-06-27,59.54,59.69,58.81,58.96,5652429,SBUX +2017-06-28,59.06,59.25,58.8,59.18,5419169,SBUX +2017-06-29,59.17,59.18,57.955,58.36,7421177,SBUX +2017-06-30,58.68,58.95,58.29,58.31,8117066,SBUX +2017-07-03,58.9,58.99,58.25,58.25,4575268,SBUX +2017-07-05,58.43,58.5,57.8,57.94,7773566,SBUX +2017-07-06,57.8,57.92,57.4,57.6,8886648,SBUX +2017-07-07,57.79,58.36,57.54,58.04,7278250,SBUX +2017-07-10,58.18,58.35,57.75,57.81,4832094,SBUX +2017-07-11,57.91,58.08,57.53,57.9,5422330,SBUX +2017-07-12,58.21,58.71,58.02,58.54,7141916,SBUX +2017-07-13,58.8,58.87,58.12,58.38,8460245,SBUX +2017-07-14,58.4,58.92,58.28,58.76,5441377,SBUX +2017-07-17,58.73,58.87,58.28,58.33,6774471,SBUX +2017-07-18,58.12,58.58,57.69,58.21,7857464,SBUX +2017-07-19,58.15,58.41,57.9,58.11,8203557,SBUX +2017-07-20,58.41,58.84,58.0,58.03,10546701,SBUX +2017-07-21,57.92,58.26,57.83,57.98,6717235,SBUX +2017-07-24,58.0,58.25,57.93,58.02,7442589,SBUX +2017-07-25,58.215,58.84,57.98,58.55,7933137,SBUX +2017-07-26,58.8,58.84,57.7847,57.94,8775889,SBUX +2017-07-27,58.25,59.66,57.93,59.5,23286716,SBUX +2017-07-28,55.23,55.96,53.41,54.0,53454789,SBUX +2017-07-31,54.48,54.68,53.95,53.98,20299407,SBUX +2017-08-01,54.57,54.79,53.97,54.73,18120912,SBUX +2017-08-02,54.75,55.45,54.6702,55.43,14764854,SBUX +2017-08-03,55.64,56.12,55.5,55.68,13331459,SBUX +2017-08-04,55.97,56.05,55.09,55.44,9179779,SBUX +2017-08-07,55.6,55.93,55.42,55.63,7253947,SBUX +2017-08-08,55.55,55.58,54.36,54.52,11095259,SBUX +2017-08-09,54.43,54.43,53.3,53.74,16717719,SBUX +2017-08-10,53.52,53.7453,52.99,53.07,13235301,SBUX +2017-08-11,53.05,53.4,53.05,53.18,9235033,SBUX +2017-08-14,53.6,53.6,53.18,53.22,7426467,SBUX +2017-08-15,53.41,53.42,52.89,53.15,6674597,SBUX +2017-08-16,53.26,53.92,53.25,53.5,7667081,SBUX +2017-08-17,53.32,53.78,52.99,53.04,7451679,SBUX +2017-08-18,52.92,53.085,52.58,52.7,10370499,SBUX +2017-08-21,53.14,53.52,52.8,53.15,12753196,SBUX +2017-08-22,53.49,54.74,53.39,54.45,14547613,SBUX +2017-08-23,53.96,54.1,53.65,54.08,11838533,SBUX +2017-08-24,54.26,54.445,53.77,53.94,8716699,SBUX +2017-08-25,54.62,54.69,54.19,54.36,8601120,SBUX +2017-08-28,54.54,54.58,54.02,54.4,6643652,SBUX +2017-08-29,54.04,54.29,54.0,54.1,6212403,SBUX +2017-08-30,54.04,54.62,54.04,54.52,5596567,SBUX +2017-08-31,54.68,54.99,54.57,54.86,8245287,SBUX +2017-09-01,54.9,55.155,54.88,54.93,7696302,SBUX +2017-09-05,54.95,55.2,54.6,55.13,9178048,SBUX +2017-09-06,55.12,55.275,54.13,54.31,11372298,SBUX +2017-09-07,54.42,54.675,53.25,53.47,13801890,SBUX +2017-09-08,53.39,53.78,53.05,53.49,11774483,SBUX +2017-09-11,53.79,54.13,53.61,54.02,9314079,SBUX +2017-09-12,53.98,54.05,53.33,53.54,10339446,SBUX +2017-09-13,53.55,54.72,53.4866,54.29,12787883,SBUX +2017-09-14,54.38,54.71,54.1,54.53,8260770,SBUX +2017-09-15,54.44,54.79,54.16,54.67,10744783,SBUX +2017-09-18,54.8,54.97,54.57,54.69,6348360,SBUX +2017-09-19,54.84,54.88,54.56,54.62,5187121,SBUX +2017-09-20,54.58,55.18,54.58,55.15,7080035,SBUX +2017-09-21,55.05,55.17,54.86,55.01,7178462,SBUX +2017-09-22,55.05,55.2,54.86,55.09,6950029,SBUX +2017-09-25,54.98,55.22,54.7,54.95,10242655,SBUX +2017-09-26,55.06,55.22,54.795,55.13,8021851,SBUX +2017-09-27,55.25,55.6,54.96,54.99,8671309,SBUX +2017-09-28,54.81,55.08,54.45,54.5,7607473,SBUX +2017-09-29,54.31,54.47,53.36,53.71,11944370,SBUX +2017-10-02,53.86,54.04,53.75,53.81,5955980,SBUX +2017-10-03,54.0,54.34,53.9,53.99,6137069,SBUX +2017-10-04,54.12,54.2,53.69,53.93,5569058,SBUX +2017-10-05,54.06,54.97,53.92,54.6,7994274,SBUX +2017-10-06,54.51,55.45,54.245,55.17,10576278,SBUX +2017-10-09,55.37,55.49,54.96,55.02,5822893,SBUX +2017-10-10,55.1,55.8492,55.08,55.42,6573918,SBUX +2017-10-11,55.46,55.8,55.26,55.64,8881897,SBUX +2017-10-12,55.67,56.27,55.309,55.97,7234267,SBUX +2017-10-13,56.0,56.43,55.61,55.72,6231132,SBUX +2017-10-16,55.67,55.8,54.89,54.91,7256893,SBUX +2017-10-17,54.86,55.23,54.29,54.51,11227337,SBUX +2017-10-18,54.46,55.43,54.22,55.21,8299509,SBUX +2017-10-19,55.08,55.5423,54.9,55.4,5720179,SBUX +2017-10-20,55.31,55.4,54.175,54.57,11741092,SBUX +2017-10-23,54.77,54.935,54.18,54.27,10111071,SBUX +2017-10-24,54.37,54.56,54.02,54.28,7818490,SBUX +2017-10-25,54.24,54.39,53.66,54.16,8281219,SBUX +2017-10-26,54.5,55.75,54.5,54.91,12211440,SBUX +2017-10-27,54.83,55.12,54.68,54.88,9922611,SBUX +2017-10-30,54.79,55.23,54.4,55.17,6430949,SBUX +2017-10-31,55.16,55.305,54.7,54.84,8857829,SBUX +2017-11-01,55.1,55.59,54.94,55.13,7189764,SBUX +2017-11-02,55.15,55.39,54.77,54.87,16879022,SBUX +2017-11-03,54.16,56.94,54.05,56.03,28773774,SBUX +2017-11-06,55.99,56.69,55.63,56.57,10835659,SBUX +2017-11-07,56.33,57.29,56.33,57.22,11167447,SBUX +2017-11-08,57.03,58.01,57.0,57.91,13533654,SBUX +2017-11-09,57.2,57.66,56.785,57.36,14758907,SBUX +2017-11-10,57.36,57.39,56.54,57.04,7930318,SBUX +2017-11-13,56.81,57.14,56.55,56.64,7648192,SBUX +2017-11-14,56.47,57.0,56.41,56.93,7758215,SBUX +2017-11-15,56.82,57.06,56.51,56.7,8880505,SBUX +2017-11-16,56.92,57.42,56.75,57.24,8310611,SBUX +2017-11-17,57.24,57.43,56.84,56.93,6311519,SBUX +2017-11-20,56.67,57.05,56.58,56.81,6360086,SBUX +2017-11-21,56.96,57.56,56.88,57.26,6284259,SBUX +2017-11-22,57.07,57.18,56.795,57.14,7309073,SBUX +2017-11-24,57.19,57.19,56.71,56.8,3479177,SBUX +2017-11-27,56.83,56.9207,55.751000000000005,55.91,10580296,SBUX +2017-11-28,56.03,56.72,55.9101,56.66,9734431,SBUX +2017-11-29,56.71,57.68,56.7,57.51,10017914,SBUX +2017-11-30,57.64,58.1399,57.47,57.82,11509224,SBUX +2017-12-01,57.5,57.71,56.461000000000006,57.32,12756391,SBUX +2017-12-04,57.54,59.19,57.5,58.76,13302050,SBUX +2017-12-05,59.25,59.68,58.91,59.34,11295644,SBUX +2017-12-06,59.38,59.83,59.23,59.28,8458198,SBUX +2017-12-07,59.12,59.275,58.761,59.14,6033792,SBUX +2017-12-08,58.52,58.845,58.1,58.61,9950491,SBUX +2017-12-11,58.39,59.35,58.29,59.07,10286560,SBUX +2017-12-12,58.99,59.36,58.87,59.27,6042917,SBUX +2017-12-13,59.44,59.89,59.3,59.49,7726299,SBUX +2017-12-14,59.73,60.05,59.44,59.7,8946111,SBUX +2017-12-15,59.25,59.37,58.1574,58.29,22595018,SBUX +2017-12-18,58.44,58.786,57.89,58.03,8751620,SBUX +2017-12-19,58.13,58.575,57.93,58.01,7946435,SBUX +2017-12-20,58.22,58.29,57.69,57.73,7188717,SBUX +2017-12-21,57.94,58.145,57.49,57.58,5974474,SBUX +2017-12-22,57.57,57.91,57.12,57.3,7148723,SBUX +2017-12-26,57.27,57.5799,57.05,57.14,5546208,SBUX +2017-12-27,57.19,57.65,57.18,57.27,4812173,SBUX +2017-12-28,57.47,58.0,57.3,57.81,5044505,SBUX +2017-12-29,57.74,57.97,57.42,57.43,5365646,SBUX +2018-01-02,57.95,58.21,57.48,57.63,7215978,SBUX +2018-01-03,57.93,58.96,57.8,58.71,7478356,SBUX +2018-01-04,58.99,59.41,58.73,58.93,5775921,SBUX +2018-01-05,59.25,59.69,59.07,59.61,6047686,SBUX +2018-01-08,59.48,59.67,58.56,59.31,6335782,SBUX +2018-01-09,59.2,59.47,58.86,59.18,5233353,SBUX +2018-01-10,59.24,60.13,58.855,59.82,8656454,SBUX +2018-01-11,59.76,60.02,59.4541,60.0,5806282,SBUX +2018-01-12,60.4,60.51,59.65,60.4,6989075,SBUX +2018-01-16,60.33,61.1,60.3,60.56,8040748,SBUX +2018-01-17,61.0,61.33,60.52,60.66,8433771,SBUX +2018-01-18,61.43,61.44,60.735,61.09,9170903,SBUX +2018-01-19,61.21,61.46,60.95,61.26,8361853,SBUX +2018-01-22,61.04,61.47,60.77,61.41,11945783,SBUX +2018-01-23,61.32,61.91,61.14,61.69,10806783,SBUX +2018-01-24,61.51,61.94,60.2326,60.83,11911867,SBUX +2018-01-25,61.03,61.2,60.4,60.55,16225618,SBUX +2018-01-26,57.94,58.17,56.55,57.99,51851690,SBUX +2018-01-29,57.55,58.35,56.91,57.02,18899867,SBUX +2018-01-30,56.96,57.54,56.74,57.19,14341155,SBUX +2018-01-31,57.23,57.45,56.7,56.81,13118364,SBUX +2018-02-01,56.28,56.42,55.89,56.0,14690146,SBUX +2018-02-02,55.9,56.32,55.7,55.77,15358909,SBUX +2018-02-05,55.53,56.26,54.57,54.69,16059955,SBUX +2018-02-06,53.685,56.06,53.56,55.61,17415065,SBUX +2018-02-07,55.08,55.43,54.44,54.46,13927022,SBUX diff --git a/tf2.0/xor3d.py b/tf2.0/xor3d.py new file mode 100644 index 00000000..4db10096 --- /dev/null +++ b/tf2.0/xor3d.py @@ -0,0 +1,33 @@ +import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D + + + +def get_label(x, i1, i2, i3): + # x = sequence + if x[i1] < 0 and x[i2] < 0 and x[i3] < 0: + return 1 + if x[i1] < 0 and x[i2] > 0 and x[i3] > 0: + return 1 + if x[i1] > 0 and x[i2] < 0 and x[i3] > 0: + return 1 + if x[i1] > 0 and x[i2] > 0 and x[i3] < 0: + return 1 + return 0 + + +N = 2000 +X = np.random.random((N, 3))*2 - 1 + +Y = np.zeros(N) +for i in range(N): + x = X[i] + y = get_label(x, 0, 1, 2) + Y[i] = y + + +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +ax.scatter(X[:,0], X[:,1], X[:,2], c=Y) +plt.show() \ No newline at end of file diff --git a/timeseries/WHERE ARE THE NOTEBOOKS.txt b/timeseries/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/timeseries/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/timeseries/extra_reading.txt b/timeseries/extra_reading.txt new file mode 100644 index 00000000..5e20b991 --- /dev/null +++ b/timeseries/extra_reading.txt @@ -0,0 +1,30 @@ +Estimating Box-Cox power transformation parameter via goodness of fit tests +https://arxiv.org/pdf/1401.3812.pdf + +Linear Regression +https://deeplearningcourses.com/c/data-science-linear-regression-in-python/ + +Logistic Regression +https://deeplearningcourses.com/c/data-science-logistic-regression-in-python/ + +Support Vector Machines +https://deeplearningcourses.com/c/support-vector-machines-in-python + +Random Forests +https://deeplearningcourses.com/c/machine-learning-in-python-random-forest-adaboost + +Deep Learning and Tensorflow 2 +https://deeplearningcourses.com/c/deep-learning-tensorflow-2 + +Gaussian Processes for Regression and Classification +https://www.cs.toronto.edu/~radford/ftp/val6gp.pdf + +How Does Backpropagation Work? (In-Depth) +https://deeplearningcourses.com/c/data-science-deep-learning-in-python/ +https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow/ + +Forecasting at Scale (Facebook Prophet) +https://peerj.com/preprints/3190.pdf + +Statistical and Machine Learning forecasting methods: Concerns and ways forward +https://journals.plos.org/plosone/article%3Fid%3D10.1371/journal.pone.0194889 \ No newline at end of file diff --git a/transformers/WHERE ARE THE NOTEBOOKS.txt b/transformers/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/transformers/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/transformers/extra_reading.txt b/transformers/extra_reading.txt new file mode 100644 index 00000000..718e2963 --- /dev/null +++ b/transformers/extra_reading.txt @@ -0,0 +1,32 @@ +Attention Is All You Need +https://arxiv.org/abs/1706.03762 + +BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding +https://arxiv.org/abs/1810.04805v2 + +Improving Language Understanding by Generative Pre-Training (GPT) +https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf + +Improving Language Understanding with Unsupervised Learning +https://openai.com/blog/language-unsupervised/ + +Language Models are Unsupervised Multitask Learners (GPT-2) +https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf + +Better Language Models and Their Implications +https://openai.com/blog/better-language-models/ + +Language Models are Few-Shot Learners (GPT-3) +https://arxiv.org/abs/2005.14165 + +List of Hugging Face Pipelines for NLP +https://lazyprogrammer.me/list-of-hugging-face-pipelines-for-nlp/ + +BitFit: Simple Parameter-efficient Fine-tuning for Transformer-based Masked Language-models +https://arxiv.org/abs/2106.10199 + +Translation Datasets +https://opus.nlpl.eu/KDE4.php + +Layer Normalization +https://arxiv.org/abs/1607.06450 \ No newline at end of file diff --git a/unsupervised_class/kmeans.py b/unsupervised_class/kmeans.py index b243b426..bb2e2659 100644 --- a/unsupervised_class/kmeans.py +++ b/unsupervised_class/kmeans.py @@ -33,13 +33,12 @@ def cost(X, R, M): def plot_k_means(X, K, max_iter=20, beta=3.0, show_plots=False): N, D = X.shape - M = np.zeros((K, D)) # R = np.zeros((N, K)) exponents = np.empty((N, K)) # initialize M to random - for k in range(K): - M[k] = X[np.random.choice(N)] + initial_centers = np.random.choice(N, K, replace=False) + M = X[initial_centers] costs = [] k = 0 diff --git a/unsupervised_class/kmeans_mnist.py b/unsupervised_class/kmeans_mnist.py index fd7ca76e..b399afab 100644 --- a/unsupervised_class/kmeans_mnist.py +++ b/unsupervised_class/kmeans_mnist.py @@ -16,7 +16,7 @@ import numpy as np import pandas as pd import matplotlib.pyplot as plt -from kmeans import plot_k_means, get_simple_data +from .kmeans import plot_k_means, get_simple_data from datetime import datetime def get_data(limit=None): diff --git a/unsupervised_class/neural_kmeans.py b/unsupervised_class/neural_kmeans.py new file mode 100644 index 00000000..5e9a2d20 --- /dev/null +++ b/unsupervised_class/neural_kmeans.py @@ -0,0 +1,47 @@ +import numpy as np +import matplotlib.pyplot as plt +from kmeans import get_simple_data +from sklearn.preprocessing import StandardScaler + + +# get the data and standardize it +X = get_simple_data() +scaler = StandardScaler() +X = scaler.fit_transform(X) + +# get shapes +N, D = X.shape +K = 3 + +# initialize parameters +W = np.random.randn(D, K) + +# set hyperparameters +n_epochs = 100 +learning_rate = 0.001 +losses = [] + +# training loop +for i in range(n_epochs): + loss = 0 + for j in range(N): + h = W.T.dot(X[j]) # K-length vector + k = np.argmax(h) # winning neuron + + # accumulate loss + loss += (W[:,k] - X[j]).dot(W[:,k] - X[j]) + + # weight update + W[:,k] += learning_rate * (X[j] - W[:,k]) + + losses.append(loss) + + +# plot losses +plt.plot(losses) +plt.show() + +# show cluster assignments +H = np.argmax(X.dot(W), axis=1) +plt.scatter(X[:,0], X[:,1], c=H, alpha=0.5) +plt.show() diff --git a/unsupervised_class/tweets.py b/unsupervised_class/tweets.py index aeb4552a..6ffba008 100644 --- a/unsupervised_class/tweets.py +++ b/unsupervised_class/tweets.py @@ -66,7 +66,7 @@ def filter_tweet(s): # transform the text into a data matrix tfidf = TfidfVectorizer(max_features=100, stop_words=stopwords) -X = tfidf.fit_transform(text).todense() +X = tfidf.fit_transform(text).asformat('array') # subsample for efficiency diff --git a/unsupervised_class2/autoencoder_tf.py b/unsupervised_class2/autoencoder_tf.py index 08c3f156..93d2b87b 100644 --- a/unsupervised_class2/autoencoder_tf.py +++ b/unsupervised_class2/autoencoder_tf.py @@ -12,6 +12,9 @@ from sklearn.utils import shuffle from util import error_rate, getKaggleMNIST +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + class AutoEncoder(object): def __init__(self, D, M, an_id): @@ -23,11 +26,11 @@ def set_session(self, session): self.session = session def build(self, D, M): - self.W = tf.Variable(tf.random_normal(shape=(D, M))) + self.W = tf.Variable(tf.random.normal(shape=(D, M))) self.bh = tf.Variable(np.zeros(M).astype(np.float32)) self.bo = tf.Variable(np.zeros(D).astype(np.float32)) - self.X_in = tf.placeholder(tf.float32, shape=(None, D)) + self.X_in = tf.compat.v1.placeholder(tf.float32, shape=(None, D)) self.Z = self.forward_hidden(self.X_in) # for transform() later self.X_hat = self.forward_output(self.X_in) @@ -36,13 +39,13 @@ def build(self, D, M): # will have numerical stability issues if X_hat = 0 or 1 logits = self.forward_logits(self.X_in) self.cost = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( + input_tensor=tf.nn.sigmoid_cross_entropy_with_logits( labels=self.X_in, logits=logits, ) ) - self.train_op = tf.train.AdamOptimizer(1e-1).minimize(self.cost) + self.train_op = tf.compat.v1.train.AdamOptimizer(1e-1).minimize(self.cost) # self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(self.cost) def fit(self, X, epochs=1, batch_sz=100, show_fig=False): @@ -82,7 +85,7 @@ def forward_hidden(self, X): def forward_logits(self, X): Z = self.forward_hidden(X) - return tf.matmul(Z, tf.transpose(self.W)) + self.bo + return tf.matmul(Z, tf.transpose(a=self.W)) + self.bo def forward_output(self, X): return tf.nn.sigmoid(self.forward_logits(X)) @@ -107,22 +110,22 @@ def set_session(self, session): def build_final_layer(self, D, M, K): # initialize logistic regression layer - self.W = tf.Variable(tf.random_normal(shape=(M, K))) + self.W = tf.Variable(tf.random.normal(shape=(M, K))) self.b = tf.Variable(np.zeros(K).astype(np.float32)) - self.X = tf.placeholder(tf.float32, shape=(None, D)) - labels = tf.placeholder(tf.int32, shape=(None,)) + self.X = tf.compat.v1.placeholder(tf.float32, shape=(None, D)) + labels = tf.compat.v1.placeholder(tf.int32, shape=(None,)) self.Y = labels logits = self.forward(self.X) self.cost = tf.reduce_mean( - tf.nn.sparse_softmax_cross_entropy_with_logits( + input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels ) ) - self.train_op = tf.train.AdamOptimizer(1e-2).minimize(self.cost) - self.prediction = tf.argmax(logits, 1) + self.train_op = tf.compat.v1.train.AdamOptimizer(1e-2).minimize(self.cost) + self.prediction = tf.argmax(input=logits, axis=1) def fit(self, X, Y, Xtest, Ytest, pretrain=True, epochs=1, batch_sz=100): N = len(X) @@ -184,8 +187,8 @@ def test_pretraining_dnn(): _, D = Xtrain.shape K = len(set(Ytrain)) dnn = DNN(D, [1000, 750, 500], K) - init_op = tf.global_variables_initializer() - with tf.Session() as session: + init_op = tf.compat.v1.global_variables_initializer() + with tf.compat.v1.Session() as session: session.run(init_op) dnn.set_session(session) dnn.fit(Xtrain, Ytrain, Xtest, Ytest, pretrain=True, epochs=10) @@ -198,8 +201,8 @@ def test_single_autoencoder(): _, D = Xtrain.shape autoencoder = AutoEncoder(D, 300, 0) - init_op = tf.global_variables_initializer() - with tf.Session() as session: + init_op = tf.compat.v1.global_variables_initializer() + with tf.compat.v1.Session() as session: session.run(init_op) autoencoder.set_session(session) autoencoder.fit(Xtrain, show_fig=True) diff --git a/unsupervised_class2/rbm_tf.py b/unsupervised_class2/rbm_tf.py index b39015b5..3b3516ce 100644 --- a/unsupervised_class2/rbm_tf.py +++ b/unsupervised_class2/rbm_tf.py @@ -25,13 +25,13 @@ def set_session(self, session): def build(self, D, M): # params - self.W = tf.Variable(tf.random_normal(shape=(D, M)) * np.sqrt(2.0 / M)) + self.W = tf.Variable(tf.random.normal(shape=(D, M)) * np.sqrt(2.0 / M)) # note: without limiting variance, you get numerical stability issues self.c = tf.Variable(np.zeros(M).astype(np.float32)) self.b = tf.Variable(np.zeros(D).astype(np.float32)) # data - self.X_in = tf.placeholder(tf.float32, shape=(None, D)) + self.X_in = tf.compat.v1.placeholder(tf.float32, shape=(None, D)) # conditional probabilities # NOTE: tf.contrib.distributions.Bernoulli API has changed in Tensorflow v1.2 @@ -42,21 +42,21 @@ def build(self, D, M): # probs=p_h_given_v, # dtype=tf.float32 # ) - r = tf.random_uniform(shape=tf.shape(p_h_given_v)) - H = tf.to_float(r < p_h_given_v) + r = tf.random.uniform(shape=tf.shape(input=p_h_given_v)) + H = tf.cast(r < p_h_given_v, dtype=tf.float32) - p_v_given_h = tf.nn.sigmoid(tf.matmul(H, tf.transpose(self.W)) + self.b) + p_v_given_h = tf.nn.sigmoid(tf.matmul(H, tf.transpose(a=self.W)) + self.b) # self.rng_v_given_h = tf.contrib.distributions.Bernoulli( # probs=p_v_given_h, # dtype=tf.float32 # ) - r = tf.random_uniform(shape=tf.shape(p_v_given_h)) - X_sample = tf.to_float(r < p_v_given_h) + r = tf.random.uniform(shape=tf.shape(input=p_v_given_h)) + X_sample = tf.cast(r < p_v_given_h, dtype=tf.float32) # build the objective - objective = tf.reduce_mean(self.free_energy(self.X_in)) - tf.reduce_mean(self.free_energy(X_sample)) - self.train_op = tf.train.AdamOptimizer(1e-2).minimize(objective) + objective = tf.reduce_mean(input_tensor=self.free_energy(self.X_in)) - tf.reduce_mean(input_tensor=self.free_energy(X_sample)) + self.train_op = tf.compat.v1.train.AdamOptimizer(1e-2).minimize(objective) # self.train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(objective) # build the cost @@ -64,7 +64,7 @@ def build(self, D, M): # just to observe what happens during training logits = self.forward_logits(self.X_in) self.cost = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( + input_tensor=tf.nn.sigmoid_cross_entropy_with_logits( labels=self.X_in, logits=logits, ) @@ -96,7 +96,7 @@ def free_energy(self, V): second_term = -tf.reduce_sum( # tf.log(1 + tf.exp(tf.matmul(V, self.W) + self.c)), - tf.nn.softplus(tf.matmul(V, self.W) + self.c), + input_tensor=tf.nn.softplus(tf.matmul(V, self.W) + self.c), axis=1 ) @@ -107,7 +107,7 @@ def forward_hidden(self, X): def forward_logits(self, X): Z = self.forward_hidden(X) - return tf.matmul(Z, tf.transpose(self.W)) + self.b + return tf.matmul(Z, tf.transpose(a=self.W)) + self.b def forward_output(self, X): return tf.nn.sigmoid(self.forward_logits(X)) @@ -128,8 +128,8 @@ def main(): _, D = Xtrain.shape K = len(set(Ytrain)) dnn = DNN(D, [1000, 750, 500], K, UnsupervisedModel=RBM) - init_op = tf.global_variables_initializer() - with tf.Session() as session: + init_op = tf.compat.v1.global_variables_initializer() + with tf.compat.v1.Session() as session: session.run(init_op) dnn.set_session(session) dnn.fit(Xtrain, Ytrain, Xtest, Ytest, pretrain=True, epochs=10) diff --git a/unsupervised_class3/bayes_classifier_gmm.py b/unsupervised_class3/bayes_classifier_gmm.py index b129c522..f3c7dd0f 100644 --- a/unsupervised_class3/bayes_classifier_gmm.py +++ b/unsupervised_class3/bayes_classifier_gmm.py @@ -28,7 +28,7 @@ def fit(self, X, Y): print("Fitting gmm", k) Xk = X[Y == k] self.p_y[k] = len(Xk) - gmm = BayesianGaussianMixture(10) + gmm = BayesianGaussianMixture(n_components=10) gmm.fit(Xk) self.gaussians.append(gmm) # normalize p(y) diff --git a/unsupervised_class3/util.py b/unsupervised_class3/util.py old mode 100644 new mode 100755 index c8194c80..935eb634 --- a/unsupervised_class3/util.py +++ b/unsupervised_class3/util.py @@ -11,7 +11,26 @@ import zipfile import numpy as np import pandas as pd -from scipy.misc import imread, imsave, imresize + +try: + # new version doesn't support + from scipy.misc import imread, imsave, imresize +except: + from PIL import Image + def imread(fn): + im = Image.open(fn) + return np.array(im) + + def imsave(fn, arr): + im = Image.fromarray(arr) + im.save(fn) + + def imresize(arr, sz): + im = Image.fromarray(arr) + im.resize(sz) + return np.array(im) + + from glob import glob from tqdm import tqdm from sklearn.utils import shuffle