导入测试数据集
这里使用的是手写体数字识别的数据
代码语言:javascript复制import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from sklearn import datasets
digits = datasets.load_digits()
digits.keys()
# dict_keys(['data', 'target', 'target_names', 'images', 'DESCR'])
X = digits.data
X.shape
# (1797, 64)
y = digits.target
y.shape
# (1797,)
digits.target_names
# array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# 可以看到这个数据集是乱序的,所以可以不进行打乱操作
y[:100]
# array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1,
# 2, 3, 4, 5, 6, 7, 8, 9, 0, 9, 5, 5, 6, 5, 0, 9, 8, 9, 8, 4, 1, 7,
# 7, 3, 5, 1, 0, 0, 2, 2, 7, 8, 2, 0, 1, 2, 6, 3, 3, 7, 3, 3, 4, 6,
# 6, 6, 4, 9, 1, 5, 0, 9, 5, 2, 8, 2, 0, 0, 1, 7, 6, 3, 2, 1, 7, 4,
# 6, 3, 1, 3, 9, 1, 7, 6, 8, 4, 3, 1])
# 查看一下第666个数据
some_digit = X[666]
y[666]
# 0
# 将这张图打印出来
some_digit_image = some_digit.reshape(8,8)
plt.imshow(some_digit_image,cmap=matplotlib.cm.binary)
plt.show()
可以看到这是数字0,之后我们使用自己封装的函数对数据集进行测试
使用kNN算法进行测试
代码语言:javascript复制# 以下两个都是自己封装的函数
from model_selection import train_test_split
from kNN import KNNClassifier
X_train,X_test,y_train,y_test = train_test_split(X,y,test_ratio = 0.2)
my_knn_clf = KNNClassifier(k=3)
my_knn_clf.fit(X_train,y_train)
y_predict= my_knn_clf.predict(X_test)
# 计算准确率
sum(y_predict == y_test) / len(y_test)
# 0.9916434540389972
使用上述过程即可使用kNN算法(k取3),且对测试集进行预测的准确率达到99.16%以上
封装自己的准确率函数
代码语言:javascript复制def accuracy_score(y_true, y_predict):
"""计算y_true和y_predict之间的准确率"""
assert len(y_true) == len(y_predict),
"the size of y_true must be equal to the size of y_predict"
return np.sum(y_true == y_predict) / len(y_true)
测试一下
代码语言:javascript复制from metrics import accuracy_score
accuracy_score(y_test,y_predict)
# 0.9916434540389972
使用scikit-learn
封装的accuracy_score
scikit-learn
中也为我们封装好了计算准确率的函数,可以直接调用
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
# 得到训练集和测试集
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2, random_state = 666)
# 得到knn分类器
knn_clf = KNeighborsClassifier(n_neighbors=3)
knn_clf.fit(X_train,y_train)
# KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
# metric_params=None, n_jobs=None, n_neighbors=3, p=2,
# weights='uniform')
y_predict = knn_clf.predict(X_test)
accuracy_score(y_test,y_predict)
# 0.9888888888888889