SVM之完整版SMO算法

2019-08-23 10:42:30 浏览数 (1)

本篇介绍完整版的SMO算法,不带核函数,和上篇的简化版一样,只适用于基本线性可分的数据集。但其运行速度会比简化版快很多。在这两个版本中,实现alpha的更改和代数运算的优化环节一模一样。在优化过程中,唯一的不同是alpha的选择方式。

代码如下:

代码语言:javascript复制
from numpy import *

def loadDataSet(fileName):
    #加载训练集
    dataMat = []; labelMat = []
    fr = open(fileName)
    for line in fr.readlines():
        lineArr = line.strip().split('t')
        dataMat.append([float(lineArr[0]), float(lineArr[1])])
        labelMat.append(float(lineArr[2]))
    return dataMat,labelMat

class optStruct:
    #用对象存储数据
    def __init__(self,dataMatIn, classLabels, C, toler):  # Initialize the structure with the parameters
        self.X = dataMatIn
        self.labelMat = classLabels
        self.C = C
        self.tol = toler
        self.m = shape(dataMatIn)[0]
        self.alphas = mat(zeros((self.m,1)))
        self.b = 0
        self.eCache = mat(zeros((self.m,2))) #误差缓存。1st 列为1时表示有效(计算好了误差)
       
def calcEk(oS, k):
    fXk = float(multiply(oS.alphas,oS.labelMat).T*(oS.X*oS.X[k,:].T))   oS.b #预测值
    Ek = fXk - float(oS.labelMat[k])  #误差(预测值减真值)
    return Ek
    
def selectJrand(i,m):
    #随机选择一个不等于i的j值
    j=i
    while (j==i):
        j = int(random.uniform(0,m))
    return j
    
def selectJ(i, oS, Ei):
    #通过最大化步长的方式选择j (即选择第2个alpha)
    maxK = -1
    maxDeltaE = 0 # 用于缓存最大误差,用尽可小的值做初始值
    Ej = 0
    oS.eCache[i] = [1,Ei]   #误差缓存。1st 列为1时表示有效(计算好了误差)
    validEcacheList = nonzero(oS.eCache[:,0].A)[0] #返回非零误差缓存对应的行索引数组
    '''
    m.A 表示矩阵m对应的数组(矩阵转数组)
    >>> x = np.array([[1,0,0], [0,2,0], [1,1,0]])
    >>> x
    array([[1, 0, 0],
           [0, 2, 0],
           [1, 1, 0]])
    >>> np.nonzero(x) #返回数组非零元素的行索引数组,和列索引数组(组成的列表)
    (array([0, 1, 2, 2]), array([0, 1, 0, 1]))
    '''
    if (len(validEcacheList)) > 1:
        for k in validEcacheList:   #循环找到最大的delta E
            if k == i:
                continue #don't calc for i, waste of time
            Ek = calcEk(oS, k)
            deltaE = abs(Ei - Ek)
            if (deltaE > maxDeltaE):
                maxK = k
                maxDeltaE = deltaE
                Ej = Ek
        return maxK, Ej
    else:   #validEcacheList 为空,表示第一次循环。则随机选择不同于i的j
        j = selectJrand(i, oS.m)
        Ej = calcEk(oS, j)
    return j, Ej
    
def updateEk(oS, k):#任何alpha改变后更新新值到误差缓存
    Ek = calcEk(oS, k)
    oS.eCache[k] = [1,Ek]
   
def clipAlpha(aj,H,L):
    #切一切alphaj,使其限制在 L和 H之间
    if aj > H:
        aj = H
    if L > aj:
        aj = L
    return aj
    
def innerL(i, oS):
    Ei = calcEk(oS, i)
    if ((oS.labelMat[i]*Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
        j,Ej = selectJ(i, oS, Ei) #和简化版不同
        alphaIold = oS.alphas[i].copy(); alphaJold = oS.alphas[j].copy();
        if (oS.labelMat[i] != oS.labelMat[j]):
            L = max(0, oS.alphas[j] - oS.alphas[i])
            H = min(oS.C, oS.C   oS.alphas[j] - oS.alphas[i])
        else:
            L = max(0, oS.alphas[j]   oS.alphas[i] - oS.C)
            H = min(oS.C, oS.alphas[j]   oS.alphas[i])
        if L==H:
            return 0
        eta = 2.0 * oS.X[i,:]*oS.X[j,:].T - oS.X[i,:]*oS.X[i,:].T - oS.X[j,:]*oS.X[j,:].T
        if eta >= 0:
            return 0
        oS.alphas[j] -= oS.labelMat[j]*(Ei - Ej)/eta
        oS.alphas[j] = clipAlpha(oS.alphas[j],H,L)
        updateEk(oS, j) #更新到误差缓存
        if (abs(oS.alphas[j] - alphaJold) < 0.00001):
            #print ("j not moving enough")
            return 0
        oS.alphas[i]  = oS.labelMat[j]*oS.labelMat[i]*(alphaJold - oS.alphas[j])#ai和aj变化量大小相等
        updateEk(oS, i) #更新到误差缓存,方向相反
        b1 = oS.b - Ei- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[i,:].T - oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.X[i,:]*oS.X[j,:].T
        b2 = oS.b - Ej- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[j,:].T - oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.X[j,:]*oS.X[j,:].T
        if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
        elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2
        else: oS.b = (b1   b2)/2.0
        return 1
    else: return 0
    
def smoP(dataMatIn, classLabels, C, toler, maxIter):
    #SVM的一种实现,最小序列法(SMO)
    #完整版,但不使用核函数,只适合划分基本线性可分的数据集
    oS = optStruct(mat(dataMatIn),mat(classLabels).transpose(),C,toler)
    iter_ = 0
    entireSet = True; alphaPairsChanged = 0
    while (iter_ < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
        alphaPairsChanged = 0
        if entireSet:   #遍历所有值
            for i in range(oS.m):       
                alphaPairsChanged  = innerL(i,oS)
                #print ("fullSet, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
            iter_  = 1
        else:
            #返回0到C之间(不含两端)的alpha的 行索引
            nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
           
            #遍历所有 非边界值(非0非C)
            for i in nonBoundIs:
                alphaPairsChanged  = innerL(i,oS)
                #print ("non-bound, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
            iter_  = 1
        if entireSet: entireSet = False #toggle entire set loop
        elif (alphaPairsChanged == 0): entireSet = True 
        print( "iteration number: %d" % iter_)
    return oS.b,oS.alphas
    
def calcWs(alphas,dataArr,classLabels):#计算权重系数
    X = mat(dataArr); labelMat = mat(classLabels).transpose()
    m,n = shape(X)
    w = zeros((n,1))
    for i in range(m):
        w  = multiply(alphas[i]*labelMat[i],X[i,:].T)
    return w
    
def classfy(Xi, w, b): #做分类预测
    y = Xi*w   b
    return 1 if y>0 else -1
    
    
dataArr, labelArr = loadDataSet("testSet.txt")
#print(labelArr)
# 常数C一方面要保障所有的样例间隔不小于1.0, 又要使得分类间隔尽可能大,尽量平衡
b, alphas = smoP(dataArr, labelArr, C=0.6, toler =0.001, maxIter=40)
w = calcWs(alphas, dataArr, labelArr)
print("偏置b: n", b,"n")
#print("alphas:n", alphas,"n")
print("权重矩阵w:n", w,"n") #预测值Y = Xi *w   b
print("支持向量:")
for i in range(len(alphas)):
    if alphas[i]> 0: #打印支持向量
        print(dataArr[i], labelArr[i])
print("第一个样本预测的分类:",end ='')
print(classfy(mat(dataArr)[0], w,b))
m, n = mat(dataArr).shape
Y_predict = zeros(m)
for i in range(m):
    x = mat(dataArr)[i]
    Y_predict[i] = classfy(x, w,b)
print(Y_predict == array(labelArr))
accuracy = (Y_predict == array(labelArr)).sum() / m #训练集上的预测准确度

0 人点赞