使用openCV提取sift;surf;hog特征

2022-05-09 14:59:44 浏览数 (1)

cv2.SIFT()

cv2.SURF()

cv2.HOGDescriptor()

使用cv2.SIFT的一个样例:(cv2.SURF使用与之类似)

代码语言:javascript复制
#coding=utf-8  
import cv2  
import scipy as sp  
  
img1 = cv2.imread('x1.jpg',0) # queryImage  
img2 = cv2.imread('x2.jpg',0) # trainImage  
  
# Initiate SIFT detector  
sift = cv2.SIFT()  
  
# find the keypoints and descriptors with SIFT  
kp1, des1 = sift.detectAndCompute(img1,None)  
kp2, des2 = sift.detectAndCompute(img2,None)  
  
# FLANN parameters  
FLANN_INDEX_KDTREE = 0  
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)  
search_params = dict(checks=50)   # or pass empty dictionary  
flann = cv2.FlannBasedMatcher(index_params,search_params)  
matches = flann.knnMatch(des1,des2,k=2)  
  
print 'matches...',len(matches)  
# Apply ratio test  
good = []  
for m,n in matches:  
    if m.distance < 0.75*n.distance:  
        good.append(m)  
print 'good',len(good)  
# #####################################  
# visualization  
h1, w1 = img1.shape[:2]  
h2, w2 = img2.shape[:2]  
view = sp.zeros((max(h1, h2), w1   w2, 3), sp.uint8)  
view[:h1, :w1, 0] = img1  
view[:h2, w1:, 0] = img2  
view[:, :, 1] = view[:, :, 0]  
view[:, :, 2] = view[:, :, 0]  
  
for m in good:  
    # draw the keypoints  
    # print m.queryIdx, m.trainIdx, m.distance  
    color = tuple([sp.random.randint(0, 255) for _ in xrange(3)])  
    #print 'kp1,kp2',kp1,kp2  
    cv2.line(view, (int(kp1[m.queryIdx].pt[0]), int(kp1[m.queryIdx].pt[1])) , (int(kp2[m.trainIdx].pt[0]   w1), int(kp2[m.trainIdx].pt[1])), color)  
  
cv2.imshow("view", view)  
cv2.waitKey()  
代码语言:javascript复制
代码语言:javascript复制
cv2.HOGDescriptor()的例子:还可以参考:https://www.programcreek.com/python/example/84776/cv2.HOGDescriptor
代码语言:javascript复制
def createTrainingInstances(self, images):
		start = time.time()
		hog = cv2.HOGDescriptor()
		instances = []
		for img, label in images:
			# print img
			img = read_color_image(img)
			img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA)
			descriptor = hog.compute(img)
			if descriptor is None:
				descriptor = []
			else:
				descriptor = descriptor.ravel()
			pairing = Instance(descriptor, label)
			instances.append(pairing)
		end = time.time() - start
		self.training_instances = instances
		print "HOG TRAIN SERIAL: %d images -> %f" % (len(images), end) 
代码语言:javascript复制
def createTestingInstances(self, images):
		start = time.time()
		hog = cv2.HOGDescriptor()
		instances = []
		for img, label in images:
			# print img
			img = read_color_image(img)
			img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA)
			descriptor = hog.compute(img)
			if descriptor is None:
				descriptor = []
			else:
				descriptor = descriptor.ravel()
			pairing = Instance(descriptor, label)
			instances.append(pairing)
		end = time.time() - start
		self.testing_instances = instances
		print "HOG TEST SERIAL: %d images -> %f" % (len(images), end) 

还有:

代码语言:javascript复制
def get_hog(image):
    # winSize = (64,64)
    winSize = (image.shape[1], image.shape[0])
    blockSize = (8,8)
    # blockSize = (16,16)
    blockStride = (8,8)
    cellSize = (8,8)
    nbins = 9
    derivAperture = 1
    winSigma = 4.
    histogramNormType = 0
    L2HysThreshold = 2.0000000000000001e-01
    gammaCorrection = 0
    nlevels = 64
    hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,
                            histogramNormType,L2HysThreshold,gammaCorrection,nlevels)
    #compute(img[, winStride[, padding[, locations]]]) -> descriptors
    winStride = (8,8)
    padding = (8,8)
    locations = [] # (10, 10)# ((10,20),)
    hist = hog.compute(image,winStride,padding,locations)
    return hist
cv2

0 人点赞