From d64a0b1e22ae338314ad535ee12ada730bb914dd Mon Sep 17 00:00:00 2001 From: Ole Schulz-Trieglaff Date: Sat, 28 Feb 2015 00:06:02 +0000 Subject: [PATCH 1/3] Linear regression in R, to get optimal solution --- 01-Simple-Linear-Regression/linreg.R | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 01-Simple-Linear-Regression/linreg.R diff --git a/01-Simple-Linear-Regression/linreg.R b/01-Simple-Linear-Regression/linreg.R new file mode 100644 index 0000000..7ebca2c --- /dev/null +++ b/01-Simple-Linear-Regression/linreg.R @@ -0,0 +1,12 @@ +x <- read.table("ex2x.dat") +y <- read.table("ex2y.dat") + +ft <- lm(y[,1]~x[,1]) +ft +anova(ft) + +png(file="age_vs_weight.png",height=600,width=600); +plot(x[,1],y[,1]) +abline(ft) +dev.off() + From 503e3e05b7497881201d525a819b67d74dd783fc Mon Sep 17 00:00:00 2001 From: Ole Schulz-Trieglaff Date: Sat, 28 Feb 2015 00:15:25 +0000 Subject: [PATCH 2/3] Gradient descent implemented using numpy --- .../linreg.gradient.py | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 01-Simple-Linear-Regression/linreg.gradient.py diff --git a/01-Simple-Linear-Regression/linreg.gradient.py b/01-Simple-Linear-Regression/linreg.gradient.py new file mode 100644 index 0000000..ce4bc64 --- /dev/null +++ b/01-Simple-Linear-Regression/linreg.gradient.py @@ -0,0 +1,40 @@ +#!/usr/bin/python + +import numpy as np + +def gradDescent(x, y, theta, alpha, m, nIter): + loss = 0 + for i in range(0, nIter): + h = np.dot(x, theta) + loss = (h-y) + gradient = np.dot(x.transpose(), loss) / m + theta = theta - alpha * gradient + + cost = np.sum(loss ** 2) / (2 * m) + print("After %d iterations, cost is %f" % (nIter, cost)) + return theta + +y = np.loadtxt('data/ex2y.dat') +x = np.loadtxt('data/ex2x.dat') + +on = np.ones(np.shape(x)) +# append ones for offset +x = np.column_stack((on,x)) + +m,n = np.shape(x) +numIter = 10000 + +# starting values +theta = np.array([5,5]) +# learning rate +alpha = 0.05 + +# gradient descent +theta = gradDescent(x, y, theta, alpha, m, numIter) +print "theta=",theta + +# analytical solution +t1 = np.linalg.inv(np.dot(x.transpose(),x)) +theta2 = np.dot(np.dot(t1,x.transpose()),y) +print "theta2=",theta2 + From 0b0c6c151ddc0bfe54cbb77560492f29f16dcdd2 Mon Sep 17 00:00:00 2001 From: Ole Schulz-Trieglaff Date: Sat, 28 Feb 2015 00:17:45 +0000 Subject: [PATCH 3/3] Adding ref to machine learning coursera class --- 01-Simple-Linear-Regression/linreg.R | 4 ++-- 01-Simple-Linear-Regression/linreg.gradient.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/01-Simple-Linear-Regression/linreg.R b/01-Simple-Linear-Regression/linreg.R index 7ebca2c..7fa4805 100644 --- a/01-Simple-Linear-Regression/linreg.R +++ b/01-Simple-Linear-Regression/linreg.R @@ -1,5 +1,5 @@ -x <- read.table("ex2x.dat") -y <- read.table("ex2y.dat") +x <- read.table("data/ex2x.dat") +y <- read.table("data/ex2y.dat") ft <- lm(y[,1]~x[,1]) ft diff --git a/01-Simple-Linear-Regression/linreg.gradient.py b/01-Simple-Linear-Regression/linreg.gradient.py index ce4bc64..2a7834c 100644 --- a/01-Simple-Linear-Regression/linreg.gradient.py +++ b/01-Simple-Linear-Regression/linreg.gradient.py @@ -2,6 +2,7 @@ import numpy as np +# Implementation based on Ex. 1 in https://www.coursera.org/course/ml def gradDescent(x, y, theta, alpha, m, nIter): loss = 0 for i in range(0, nIter):