HOG全称:方向梯度直方图(Histogram of Oriented Gradient),发表于2005年的CVPR,是一种图像特征提取算法,和SVM分类器结合应用于行人检测领域。HOG通过计算图像中每个像素的梯度的大小和方向,来获取图像的梯度特征,是一种特征描述子。
HOG特点
1.由于计算局部直方图和归一化,所以它对图像几何的和光学的形变都能保持很好的不变性; 2.细微的动作可以被忽略而不影响检测效果。
HOG计算步骤
1.对输入图像进行灰度化 2.利用gamma校正法对图像进行颜色空间归一化; 3.计算图像中每个像素的梯度大小和方向; 4.将图像划分cells,计算每个cell内的梯度直方图; 5.将每几个cell组成一个block,计算每个block内的梯度特征; 6.将图像中所有block的梯度特征组合起来就得到了图像的特征描述子; 7.将图像特征输入分类器进行分类。
HOG参数计算
计算流程 图像(image)->滑动图像块(block)->细胞单元(cells)
1.block个数计算 假设图像大小为128x128,block大小为16x16, block stride为8x8 则block个数 = ((128-16)/8 1) x ((128-16)/8 1) = 15x15 = 225 2.每个block内的cell个数计算 假设cell size为8x8 则cell个数 = (16x16) / (8x8) = 4 3.每张图特征维度 假设直方图等级数 bins = 9 则每张图的特征维度 = 225 x 4 x 9 = 8100
HOG提取特征效果
原图:
HOG特征图:
HOG代码实现
1.基于python的scikit-image库提供了HOG特征提取的接口:
代码语言:javascript复制from skimage import feature as ft
features = ft.hog(image, # input image
orientations=ori, # number of bins
pixels_per_cell=ppc, # pixel per cell
cells_per_block=cpb, # cells per blcok
block_norm = 'L1', # block norm : str {‘L1’, ‘L1-sqrt’, ‘L2’, ‘L2-Hys’}
transform_sqrt = True, # power law compression (also known as gamma correction)
feature_vector=True, # flatten the final vectors
visualise=False) # return HOG map
应用示例:
代码语言:javascript复制from skimage.feature import hog
gray = rgb2gray(image) / 255.0
fd = hog(gray, orientations=12, block_norm='L1', pixels_per_cell=[10, 10], cells_per_block=[4, 4], visualize=False, transform_sqrt=True)
2.HOG代码实现
代码语言:javascript复制import cv2
import numpy as np
import math
import matplotlib.pyplot as plt
class Hog_descriptor():
def __init__(self, img, cell_size=16, bin_size=8):
self.img = img
self.img = np.sqrt(img / np.max(img))
self.img = img * 255
self.cell_size = cell_size
self.bin_size = bin_size
self.angle_unit = 360 / self.bin_size
def extract(self):
height, width = self.img.shape
# 计算图像的梯度大小和方向
gradient_magnitude, gradient_angle = self.global_gradient()
gradient_magnitude = abs(gradient_magnitude)
cell_gradient_vector = np.zeros((int(height / self.cell_size), int(width / self.cell_size), self.bin_size))
for i in range(cell_gradient_vector.shape[0]):
for j in range(cell_gradient_vector.shape[1]):
# cell内的梯度大小
cell_magnitude = gradient_magnitude[i * self.cell_size:(i 1) * self.cell_size,
j * self.cell_size:(j 1) * self.cell_size]
# cell内的梯度方向
cell_angle = gradient_angle[i * self.cell_size:(i 1) * self.cell_size,
j * self.cell_size:(j 1) * self.cell_size]
# 转化为梯度直方图格式
cell_gradient_vector[i][j] = self.cell_gradient(cell_magnitude, cell_angle)
# 绘制梯度直方图
hog_image = self.render_gradient(np.zeros([height, width]), cell_gradient_vector)
# block组合、归一化
hog_vector = []
for i in range(cell_gradient_vector.shape[0] - 1):
for j in range(cell_gradient_vector.shape[1] - 1):
block_vector = []
block_vector.extend(cell_gradient_vector[i][j])
block_vector.extend(cell_gradient_vector[i][j 1])
block_vector.extend(cell_gradient_vector[i 1][j])
block_vector.extend(cell_gradient_vector[i 1][j 1])
mag = lambda vector: math.sqrt(sum(i ** 2 for i in vector))
magnitude = mag(block_vector)
if magnitude != 0:
normalize = lambda block_vector, magnitude: [element / magnitude for element in block_vector]
block_vector = normalize(block_vector, magnitude)
hog_vector.append(block_vector)
return hog_vector, hog_image
def global_gradient(self):
gradient_values_x = cv2.Sobel(self.img, cv2.CV_64F, 1, 0, ksize=5)
gradient_values_y = cv2.Sobel(self.img, cv2.CV_64F, 0, 1, ksize=5)
gradient_magnitude = cv2.addWeighted(gradient_values_x, 0.5, gradient_values_y, 0.5, 0)
gradient_angle = cv2.phase(gradient_values_x, gradient_values_y, angleInDegrees=True)
return gradient_magnitude, gradient_angle
def cell_gradient(self, cell_magnitude, cell_angle):
orientation_centers = [0] * self.bin_size
for i in range(cell_magnitude.shape[0]):
for j in range(cell_magnitude.shape[1]):
gradient_strength = cell_magnitude[i][j]
gradient_angle = cell_angle[i][j]
min_angle, max_angle, mod = self.get_closest_bins(gradient_angle)
orientation_centers[min_angle] = (gradient_strength * (1 - (mod / self.angle_unit)))
orientation_centers[max_angle] = (gradient_strength * (mod / self.angle_unit))
return orientation_centers
def get_closest_bins(self, gradient_angle):
idx = int(gradient_angle / self.angle_unit)
mod = gradient_angle % self.angle_unit
return idx, (idx 1) % self.bin_size, mod
def render_gradient(self, image, cell_gradient):
cell_width = self.cell_size / 2
max_mag = np.array(cell_gradient).max()
for x in range(cell_gradient.shape[0]):
for y in range(cell_gradient.shape[1]):
cell_grad = cell_gradient[x][y]
cell_grad /= max_mag
angle = 0
angle_gap = self.angle_unit
for magnitude in cell_grad:
angle_radian = math.radians(angle)
x1 = int(x * self.cell_size magnitude * cell_width * math.cos(angle_radian))
y1 = int(y * self.cell_size magnitude * cell_width * math.sin(angle_radian))
x2 = int(x * self.cell_size - magnitude * cell_width * math.cos(angle_radian))
y2 = int(y * self.cell_size - magnitude * cell_width * math.sin(angle_radian))
cv2.line(image, (y1, x1), (y2, x2), int(255 * math.sqrt(magnitude)))
angle = angle_gap
return image
img = cv2.imread('0.jpg', cv2.IMREAD_GRAYSCALE)
hog = Hog_descriptor(img, cell_size=8, bin_size=9)
vector, image = hog.extract()
# 输出图像的特征向量shape
print(np.array(vector).shape)
plt.imshow(image, cmap=plt.cm.gray)
plt.show()