方差: 预测结果的离散程度
决策树回归 decision tree regression
from sklearn.tree import DecisionTreeRegressor
# 训练模型
# 决策树回归
# max_depth 树的深度, 设定位4,防止任意深度过深,导致过拟合
dt_regressor = DecisionTreeRegressor(max_depth=4)
dt_regressor.fit(X_train, y_train)
y_pred_dt = dt_regressor.predict(X_test)
mse = mean_squared_error(y_test, y_pred_dt)
evs = explained_variance_score(y_test, y_pred_dt)
print("#### Decision Tree performance ####")
print("Mean squared error (均方误差/平均平方误差 (越小越好)) = ", round(mse, 2))
print("Explained variance score (解释方差分 (0-1) 1 接近表示解释能力越好) =", round(evs, 2))
# 决策树特征权重
def plot_feature_importances(feature_importances, title, feature_names):
# 将重要性值标准化
feature_importances = 100.0 * (feature_importances / max(feature_importances))
# 将得分从高到低排序
index_sorted = np.flipud(np.argsort(feature_importances))
# 让X坐标轴上的标签居中显示
pos = np.arange(index_sorted.shape[0]) + 0.5
# 画条形图
plt.figure()
plt.bar(pos, feature_importances[index_sorted], align='center')
print(pos, index_sorted, feature_importances)
plt.xticks(pos, [feature_names[i] for i in index_sorted])
plt.ylabel('Relative Importance')
plt.title(title)
plt.show()
# print(dt_regressor.feature_importances_)
# print(dt_regressor.feature_importances_)
plot_feature_importances(dt_regressor.feature_importances_, 'Decision Tree regressor', data_columns)
自适应决策树回归 adaboost regressor
from sklearn.ensemble import AdaBoostRegressor
# AdaBoost: adaptive boosting (自适应增强)
# n_estimators 基学习器的个数
ab_regressor = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4), n_estimators=400, random_state=7)
ab_regressor.fit(X_train, y_train)
y_pred_ab = ab_regressor.predict(X_test)
mse = mean_squared_error(y_test, y_pred_ab)
evs = explained_variance_score(y_test, y_pred_ab)
print("#### AdaBoost performance ####")
print("Mean squared error (均方误差/平均平方误差 (越小越好)) = ", round(mse, 2))
print("Explained variance score (解释方差分 (0-1) 1 接近表示解释能力越好) =", round(evs, 2))
def plot_feature_importances(feature_importances, title, feature_names):
# 将重要性值标准化
feature_importances = 100.0 * (feature_importances / max(feature_importances))
# 将得分从高到低排序
index_sorted = np.flipud(np.argsort(feature_importances))
# 让X坐标轴上的标签居中显示
pos = np.arange(index_sorted.shape[0]) + 0.5
# 画条形图
plt.figure()
plt.bar(pos, feature_importances[index_sorted], align='center')
print(pos, index_sorted, feature_importances)
plt.xticks(pos, [feature_names[i] for i in index_sorted])
plt.ylabel('Relative Importance')
plt.title(title)
plt.show()
plot_feature_importances(ab_regressor.feature_importances_, 'AdaBoost regressor', data_columns)
随机森林回归 random forest regressor
from sklearn.ensemble import RandomForestRegressor
# 训练模型
# 随机森林回归
# 随机森林回归, n_estimators 决策树的个数, max_depth 决策树的深度, min_samples_split 决策树的最小样本数
rf_regressor = RandomForestRegressor(n_estimators=1000, max_depth=4, min_samples_split=2)
rf_regressor.fit(X_train, y_train)
y_pred_rf = rf_regressor.predict(X_test)
mse = mean_squared_error(y_test, y_pred_rf)
evs = explained_variance_score(y_test, y_pred_rf)
print("#### Random Forest Regressor performance ####")
print("Mean squared error (均方误差/平均平方误差 (越小越好)) = ", round(mse, 2))
print("Explained variance score (解释方差分 (0-1) 1 接近表示解释能力越好) =", round(evs, 2))
def plot_feature_importances(feature_importances, title, feature_names):
# 将重要性值标准化
feature_importances = 100.0 * (feature_importances / max(feature_importances))
# 将得分从高到低排序
index_sorted = np.flipud(np.argsort(feature_importances))
# 让X坐标轴上的标签居中显示
pos = np.arange(index_sorted.shape[0]) + 0.5
# 画条形图
plt.figure()
plt.bar(pos, feature_importances[index_sorted], align='center')
print(pos, index_sorted, feature_importances)
plt.xticks(pos, [feature_names[i] for i in index_sorted])
plt.ylabel('Relative Importance')
plt.title(title)
plt.show()
# print(dt_regressor.feature_importances_)
# print(dt_regressor.feature_importances_)
plot_feature_importances(rf_regressor.feature_importances_, 'random forest regressor', data_columns)
IT免费在线工具网 https://orcc.online