1. 简单线性回归
代码语言:javascript复制import numpy as np
import matplotlib.pyplot as plt
X = np.array([[6],[8],[10],[14],[18]])
y = np.array([7,9,13,17.5,18])
plt.title("pizza diameter vs price")
plt.xlabel('diameter')
plt.ylabel('price')
plt.plot(X,y,'r.') # r表示颜色红
代码语言:javascript复制from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X,y)
test_pizza = np.array([[12]])
pred_price = model.predict(test_pizza)
pred_price
# array([13.68103448])
- 误差
print("误差平方和:%.2f" % np.mean((model.predict(X)-y)**2))
误差平方和:1.75
- 方差
# 方差
x_bar = X.mean() # 11.2
variance = ((X-x_bar)**2).sum()/(len(X)-1)
variance # 23.2
np.var(X, ddof=1) # np内置的方差,ddof为校正选项
###################
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
- 协方差
# 协方差,两个变量之间的相关性
y_bar = y.mean()
covariance = np.multiply((X-x_bar).transpose(), y-y_bar).sum()/(len(X)-1)
covariance # 22.65
np.cov(X.transpose(), y)
代码语言:javascript复制array([[23.2 , 22.65],
[22.65, 24.3 ]])
假设模型为
模型为
2. 评价模型
代码语言:javascript复制X_test = np.array([8,9,11,16,12]).reshape(-1,1)
y_test = [11,8.5,15,18,11]
r_squared = model.score(X_test, y_test)
r_squared # 0.6620052929422553