Skip to content
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Create house
  • Loading branch information
thelisq authored Dec 2, 2017
commit 0b1cc0ecfceba138297eda77715a43d3d1da647d
98 changes: 98 additions & 0 deletions competitions/getting-started/house
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
import pandas as pd
import numpy as np
from sklearn.linear_model import Lasso,LinearRegression,Ridge,ElasticNet,TheilSenRegressor,HuberRegressor,RANSACRegressor
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor,ExtraTreeRegressor
from sklearn.ensemble import AdaBoostRegressor,ExtraTreesRegressor,GradientBoostingRegressor,RandomForestRegressor
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import itertools
train = pd.read_csv('train.csv')
train.head()
test = pd.read_csv('test.csv')
test.head()
test.dtypes['Id']
num_columns = []
cate_columns = []
for column in test.columns:
if test.dtypes[column]!=np.dtype('object'):
num_columns.append(column)
else:
cate_columns.append(column)
print(num_columns)
print(cate_columns)
for column in num_columns:
train[column] = train[column].fillna(train[column].median())
test[column] = test[column].fillna(test[column].median())

for column in cate_columns:
train[column] = train[column].fillna('NaN')
test[column] = test[column].fillna('NaN')

data = pd.concat([train,test],axis=0)
for column in cate_columns:
t = pd.get_dummies(data[column],prefix=column)
train = pd.concat([train,t[:len(train):]],axis=1)
train.drop(column,axis=1,inplace=True)
test = pd.concat([test,t[len(train):]],axis=1)
test.drop(column,axis=1,inplace=True)

label = train.pop('SalePrice')
X_train, X_test, y_train, y_test = train_test_split(train, label, test_size=0.33, random_state=42)
regs = [
['Lasso',Lasso()],
['LinearRegression',LinearRegression()],
['Ridge',Ridge()],
['ElasticNet',ElasticNet()],
['TheilSenRegressor',TheilSenRegressor()],
['RANSACRegressor',RANSACRegressor()],
['HuberRegressor',HuberRegressor()],
['SVR',SVR(kernel='linear')],
['DecisionTreeRegressor',DecisionTreeRegressor()],
['ExtraTreeRegressor',ExtraTreeRegressor()],
['AdaBoostRegressor',AdaBoostRegressor(n_estimators=150)],
['ExtraTreesRegressor',ExtraTreesRegressor(n_estimators=150)],
['GradientBoostingRegressor',GradientBoostingRegressor(n_estimators=150)],
['RandomForestRegressor',RandomForestRegressor(n_estimators=150)],
['XGBRegressor',XGBRegressor(n_estimators=150)],
]
preds = []
for reg_name,reg in regs:
print(reg_name)
reg.fit(X_train,y_train)
y_pred = reg.predict(X_test)
if np.sum(y_pred<0) > 0:
print('y_pred have', np.sum(y_pred < 0), 'negative values, we fill it with np.median(y_pred)')
y_pred[y_pred < 0] = np.median(y_pred)
score = np.sqrt(mean_squared_error(np.log(y_test), np.log(y_pred)))
preds.append([reg_name, y_pred])

final_results = []
for comb_length in range(1,len(regs)+1):
print('Model Amount:',comb_length)
results = []
for comb in itertools.combinations(preds,comb_length):
pred_sum = 0
model_name = []
for reg_name,pred in comb:
pred_sum += pred
model_name.append(reg_name)
pred_sum /= comb_length
model_name = '+'.join(model_name)
score = np.sqrt(mean_squared_error(np.log(y_test),np.log(pred_sum)))
results.append([model_name,score])
results = sorted(results,key=lambda x:x[1])
for model_name,score in results:
print(model_name,score)
print()
final_results.append(results[0])

final_result = sorted(final_results,key=lambda x:x[1])
for model_name,score in final_results:
print(model_name,score)

[b for b in zip(itertools.count(),[a[0] for a in regs])]
pred = np.mean(list(map(lambda x:regs[x][1].predict(test),[0,12,14])),axis=0)
sub = pd.DataFrame({'Id':test['Id'],'SalePrice':pred})
sub.to_csv('submission_Universe_fillNaN.csv',index=None)