小言_互联网的博客

opencv 箱子分割案例分析

191人阅读  评论(0)

1.https://www.jb51.net/article/164348.htm


  
  1. import cv 2
  2. import numpy as np
  3. img= cv 2.imread(' 15_ 13_ 57_ 06.jpg')
  4. grey=cv 2.cvtColor(img,cv 2.COLOR_BGR 2GRAY)
  5. retVal,greyimg=cv 2.threshold(grey, 150, 255,cv 2.THRESH_BINARY)
  6. greyimg=cv 2.GaussianBlur(greyimg,( 5, 5), 0)
  7. canny1 = cv 2.Canny(greyimg, 100, 150)
  8. contours, hierarchy=cv 2.findContours(greyimg,cv 2.RETR_TREE,cv 2.CHAIN_APPROX_SIMPLE)
  9. for c in contours:
  10. if cv 2.contourArea(c)> 200:
  11. rect = cv 2.minAreaRect(c)
  12. # 计算最小区域的坐标
  13. box = cv 2.boxPoints(rect)
  14. # 坐标规范化为整数
  15. box = np.int 0(box)
  16. cv2.drawContours(grey, [box], 0, (0, 0, 255), 3)
  17. # cv2.drawContours(grey,contours,-1,(0,0,255),3)
  18. cv2.imshow('111',greyimg)
  19. cv2.imshow('222',grey)
  20. cv2.imshow('3331',canny1)
  21. # cv2.imshow('333',dict)
  22. cv2.waitKey(0)

1.1 cv2.threshold 图像二值化

https://www.cnblogs.com/yinliang-liang/p/9293310.html


  
  1. # 1.简单阈值
  2. retVal,th 1=cv 2.threshold(gray, 100, 255,cv 2.THRESH_BINARY)
  3. #Otsu 滤波
  4. ret2,th 2= cv 2.threshold(gray, 0, 255,cv 2.THRESH_BINARY+cv 2.THRESH_OTSU)
  5. # 2.自适应阈值
  6. th3 = cv 2.adaptiveThreshold(gray, 255,cv 2.ADAPTIVE_THRESH_MEAN_C,\
  7. cv2.THRESH_BINARY, 3, 2) #换行符号 \
  8. th4 = cv 2.adaptiveThreshold(gray, 255,cv 2.ADAPTIVE_THRESH_GAUSSIAN_C,\
  9. cv2.THRESH_BINARY, 3, 2) #换行符号 \
  10. cv2.imshow(' 111',gray)
  11. cv2.imshow(' 221',th 1)
  12. cv2.imshow(' 222',th 2)
  13. cv2.imshow(' 223',th 3)
  14. cv2.imshow(' 224',th 4)

2.cv2.GaussianBlur

高斯模糊,就是使用权重满足正则化的原则。越是中心的数据权重越大,使用正则化的权重去模糊数据。具体就是拿正则化的比例去卷积数据。卷积核的大小和均方差的大小。如果均方差大,说明中心的数据的权重就小一点。


  
  1. import numpy
  2. import numpy as np
  3. from scipy.stats import norm
  4. from math import floor
  5. import cv2
  6. gaussian_kernel_size = 3 # 3 * 3 #
  7. gaussian_kernel_sigma = 2
  8. gaussian = norm(loc = 0.0 , scale = gaussian_kernel_sigma)
  9. pdf = gaussian.pdf(
  10. numpy.arange(
  11. -floor(gaussian_kernel_size * 0.5 ),
  12. floor(gaussian_kernel_size * 0.5 ) + 1 ,
  13. dtype = numpy.float64
  14. )
  15. )
  16. pdf /= numpy.sum(pdf) + numpy.finfo(numpy.float64).eps # 别忘了这一步要归一化 #
  17. data = numpy.array(
  18. [
  19. [ 0, 0, 0, 0, 0],
  20. [ 0, 0, 0, 0, 0],
  21. [ 0, 0, 1, 0, 0],
  22. [ 0, 0, 0, 0, 0],
  23. [ 0, 0, 0, 0, 0]
  24. ] ,
  25. dtype = numpy.float64
  26. )
  27. a=[]
  28. for i,d in enumerate(numpy.pad(data, 1 )):
  29. for j in range(5):
  30. a.append(np.dot(pdf, d[j:j + 3 ]))
  31. ary=np.array(a).reshape(7,5).T
  32. b=[]
  33. for i,d in enumerate(ary):
  34. for j in range(5):
  35. b.append(np.dot(pdf, d[j:j + 3 ]))
  36. ary1=np.array(b).reshape(5,5)
  37. print(ary1)
  38. cv2.GaussianBlur(data, (gaussian_kernel_size, gaussian_kernel_size), gaussian_kernel_sigma)

3.cv2.Sobel  先高斯然后使用X,Y两个卷积核卷积

4.cv2.Canny  


  
  1. import cv2
  2. import numpy as np
  3. img= cv2.imread('16_09_10_40.jpg')
  4. gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
  5. gray=cv2.bilateralFilter(gray,d=0,sigmaColor=100,sigmaSpace=15)
  6. # gray=cv2.GaussianBlur(gray,(5,5),0)
  7. rows,cols=gray.shape
  8. gradientX=np.zeros((rows,cols),np.uint8)
  9. gradientY=np.zeros((rows,cols),np.uint8)
  10. gradientXY=np.zeros((rows,cols),np.uint8)
  11. pointDirection=np.zeros((rows,cols),np.uint8)
  12. threshold=np.zeros((rows,cols),np.uint8)
  13. highThreshold=100
  14. lowThreshold=20
  15. sobelx=[[-1,0,1],
  16. [ -1, 0, 1] ,
  17. [ -1, 0, 1] ]
  18. sobely=[[1,2,1],
  19. [ 0, 0, 0] ,
  20. [ -1, -2, -1] ]
  21. sobelx=np.array(sobelx)
  22. sobely=np.array(sobely)
  23. sobel_x_h,sobel_x_w=sobelx.shape
  24. sobel_y_h,sobel_y_w=sobely.shape
  25. pad_x_img=np.pad(gray,((sobel_x_h//2,sobel_x_h//2),(sobel_x_w//2,sobel_x_w//2)))
  26. pad_y_img=np.pad(gray,((sobel_y_h//2,sobel_y_h//2),(sobel_y_w//2,sobel_y_w//2)))
  27. gradientX=cv2.filter2D(pad_x_img,-1,sobelx)
  28. for i in range(rows):
  29. for j in range(cols):
  30. cur_output_x = pad_x_img[i:i+sobel_x_h,j:j+sobel_x_w] * sobelx
  31. cur_output_y = pad_y_img[i:i + sobel_y_h, j:j + sobel_y_w] * sobely
  32. conv_sum_x = np.sum(cur_output_x)
  33. conv_sum_y = np.sum(cur_output_y)
  34. gradientY[i, j] = conv_sum_y
  35. gradientX[i, j] = conv_sum_x
  36. gradientXY[i,j]=np.sqrt(conv_sum_y**2+conv_sum_x**2)
  37. pointDirection[i,j]=np.arctan(conv_sum_y/conv_sum_x)
  38. outputImage=gradientXY.copy()
  39. for i in range(1,rows-1):
  40. for j in range(1,cols-1):
  41. NE=gradientXY[i-1,j+1]
  42. NW=gradientXY[i-1,j-1]
  43. N=gradientXY[i-1,j]
  44. W=gradientXY[i,j-1]
  45. E=gradientXY[i,j+1]
  46. SW=gradientXY[i+1,j-1]
  47. S=gradientXY[i+1,j]
  48. SE=gradientXY[i+1,j+1]
  49. theta=pointDirection[i,j]
  50. if theta<=np.pi/4 and theta>=0 :
  51. gp1=(1-np.tan(theta))*E+np.tan(theta)*NE
  52. gp2=(1-np.tan(theta))*W+np.tan(theta)*SW
  53. elif theta>np.pi/4:
  54. gp1=(1-1/np.tan(theta))*N+1/np.tan(theta)*NE
  55. gp2=(1-1/np.tan(theta))*S+1/np.tan(theta)*SW
  56. elif theta<0 and theta>=-np.pi/4:
  57. gp1 = (1 - np.tan(-theta)) * E + np.tan(-theta) * SE
  58. gp2 = (1 - np.tan(-theta)) * W + np.tan(-theta) * NW
  59. else:
  60. gp1 = (1 - 1 /np.tan(-theta)) * S + 1 /np.tan(-theta) * SE
  61. gp2 = (1 - 1 /np.tan(-theta)) * N + 1 /np.tan(-theta) * NW
  62. if gradientXY[i,j]<gp1 or gradientXY[i,j]<gp2:
  63. outputImage[i,j]=0
  64. highPoints=[]
  65. for i in range(1,rows-1):
  66. for j in range(1,cols-1):
  67. if outputImage[i,j]>=highThreshold:
  68. outputImage[i,j]=255
  69. highPoints.append([i,j])
  70. elif outputImage[i,j]<lowThreshold:
  71. outputImage[i,j]=0
  72. def DoubleThresholdLinkRecurrent(image, lowThreshold, i, j):
  73. if i <= 0 or j <= 0 or i >= rows - 1 or j >= cols - 1:
  74. return
  75. if image[i - 1 , j - 1 ] >= lowThreshold and image[i - 1 , j - 1 ] < 255:
  76. image[i - 1 , j - 1 ]=255
  77. DoubleThresholdLinkRecurrent(image, lowThreshold, i - 1 , j - 1 )
  78. if (image[i - 1 , j] >= lowThreshold and image[i - 1 , j] < 255 ):
  79. image[i - 1 , j]= 255
  80. DoubleThresholdLinkRecurrent(image, lowThreshold, i - 1 , j)
  81. if (image[i - 1 , j + 1 ] >= lowThreshold and image[i - 1 , j + 1 ] < 255 ):
  82. image[i - 1 , j + 1 ]=255
  83. DoubleThresholdLinkRecurrent(image, lowThreshold, i - 1 , j + 1 )
  84. if (image[i, j - 1 ] >= lowThreshold and image[i, j - 1 ] < 255 ):
  85. image[i, j - 1 ]= 255
  86. DoubleThresholdLinkRecurrent(image, lowThreshold, i, j - 1 )
  87. if (image[i, j + 1 ] >= lowThreshold and image[i, j + 1 ] < 255 ):
  88. image[i, j + 1 ]= 255
  89. DoubleThresholdLinkRecurrent(image, lowThreshold, i, j + 1 )
  90. if (image[i + 1 , j - 1 ]>= lowThreshold and image[i + 1 , j - 1 ] < 255 ):
  91. image[i + 1 , j - 1 ]= 255
  92. DoubleThresholdLinkRecurrent(image, lowThreshold, i + 1 , j - 1 )
  93. if (image[i + 1 , j] >= lowThreshold and image[i + 1 , j] < 255 ):
  94. image[i + 1 , j]=255
  95. DoubleThresholdLinkRecurrent(image, lowThreshold, i + 1 , j)
  96. if (image[i + 1 , j + 1 ] >= lowThreshold and image[i + 1 , j + 1 ] < 255 ):
  97. image[i + 1 , j + 1 ]=255
  98. DoubleThresholdLinkRecurrent(image, lowThreshold, i + 1 , j + 1 )
  99. for poinst in highPoints:
  100. DoubleThresholdLinkRecurrent(outputImage,lowThreshold,poinst[0],poinst[1])
  101. for i in range(1,rows-1):
  102. for j in range(1,cols-1):
  103. if outputImage[i,j]<255:
  104. outputImage[i,j]=0
  105. # canny_car=cv2.Canny(gray,200,256)
  106. higth=10
  107. width=3
  108. kernel1_temp = cv2.getStructuringElement(cv2.MORPH_RECT,(width, higth))
  109. kernel2_temp = cv2.getStructuringElement(cv2.MORPH_RECT,(higth, width))
  110. test_ONE_1 = cv2.morphologyEx(outputImage,cv2.MORPH_CLOSE,kernel1_temp)
  111. test_ONE_2 = cv2.morphologyEx(test_ONE_1,cv2.MORPH_OPEN,kernel1_temp)
  112. test_TWO_1 = cv2.morphologyEx(outputImage,cv2.MORPH_CLOSE,kernel2_temp)
  113. test_TWO_2 = cv2.morphologyEx(test_TWO_1,cv2.MORPH_OPEN,kernel2_temp)
  114. test_combine = test_ONE_2 + test_TWO_2
  115. cv2.imshow('gradientX',gradientX)
  116. cv2.imshow('gradientY',gradientY)
  117. cv2.imshow('gradientXY',gradientXY)
  118. cv2.imshow('outputImage',outputImage)
  119. cv2.imshow('canny_car',canny_car)
  120. cv2.imshow('img',img)
  121. cv2.imshow('test_combine',test_combine)
  122. cv2.waitKey(0)

5.cv2.bilateralFilter 双线滤波


  
  1. import cv 2
  2. import math
  3. import numpy as np
  4. # 高斯核生成函数
  5. def gaussian_kernel(gaussian_kernel_size, sigma= 1, k= 1):
  6. if sigma == 0:
  7. sigma = ((gaussian_kernel_size - 1) * 0. 5 - 1) * 0. 3 + 0. 8
  8. X = np.linspace(-k, k, gaussian_kernel_size)
  9. Y = np.linspace(-k, k, gaussian_kernel_size)
  10. x, y = np.meshgrid(X, Y)
  11. gauss = np.exp(-(x ** 2 + y ** 2) / ( 2 * sigma ** 2))
  12. return gauss * ( 1 / np.sum(gauss))
  13. def pixel_kernel(pix, box, pg):
  14. box1 = np.zeros(box.shape)
  15. for i in range(len(box)):
  16. t = int(math.fabs(int(pix) - int(box[i])))
  17. box1[i] = pg[t]
  18. return box 1
  19. def pixel_gaussion(sigma= 30):
  20. box = np.zeros( 256)
  21. for i in range( 0, 255):
  22. box[i] = np.exp(-(i ** 2) / ( 2 * sigma ** 2))
  23. return box
  24. def spilt(a):
  25. if a / 2 == 0:
  26. x1 = x 2 = a / 2
  27. else:
  28. x1 = math.floor(a / 2)
  29. x2 = a - x 1
  30. return -x 1, x 2
  31. def get_pixel(i, j, gaussian_kernel_size, gray):
  32. temp = np.zeros(gaussian_kernel_size * gaussian_kernel_size)
  33. count = 0
  34. x1, x 2 = spilt(gaussian_kernel_size)
  35. for m in range(x 1, x 2):
  36. for n in range(x 1, x 2):
  37. if i + m < 0 or i + m > gray.shape[ 0] - 1 or j + n < 0 or j + n > gray.shape[ 1] - 1:
  38. temp[count] = gray[i, j]
  39. else:
  40. temp[count] = gray[i + m, j + n]
  41. count += 1
  42. return temp
  43. def main():
  44. # 高斯kernel_size=5
  45. gaussian_kernel_size = 5
  46. # 空间高斯sigma
  47. s_sigma = 10
  48. # 灰度高斯sigma
  49. g_sigma = 30
  50. gk = gaussian_kernel(gaussian_kernel_size, s_sigma) # 空间高斯核
  51. img = cv 2.imread(' 16_ 09_ 11_ 55.jpg')
  52. gray = cv 2.cvtColor(img, cv 2.COLOR_BGR 2GRAY)
  53. rows, cols = gray.shape
  54. mybilateralFilter = np.zeros(gray.shape,np.uint 8)
  55. pg = pixel_gaussion(g_sigma)
  56. print(np.sum(pg)) # list
  57. for i in range(rows):
  58. for j in range(cols):
  59. box = get_pixel(i, j, gaussian_kernel_size, gray) # 获取相邻像素
  60. pk = pixel_kernel(gray[i, j], box, pg)
  61. box = np.array(box).reshape(gaussian_kernel_size, gaussian_kernel_size)
  62. pk = np.array(pk).reshape(gaussian_kernel_size, gaussian_kernel_size)
  63. evalue = np.multiply(gk, pk)
  64. mybilateralFilter[i, j] = int(np.sum(box * evalue))
  65. blur = cv 2.bilateralFilter(gray, 9, 10, 30)
  66. cv2.imshow('myblur', mybilateralFilter)
  67. cv2.imshow('blur',blur)
  68. cv2.waitKey( 0)
  69. if __name__ == "__main__":
  70. main()

6.cv2.kmeans 聚类


  
  1. import cv 2
  2. import numpy as np
  3. import math
  4. import os
  5. srcImage=cv 2.imread( "15_13_57_06.jpg")
  6. # 均值聚类提取前景:二维转一维
  7. def kmean_img(srcImage):
  8. imgVec = np.float 32(srcImage.reshape((- 1, 3)))
  9. criteria = (cv 2.TERM_CRITERIA_EPS + cv 2.TERM_CRITERIA_MAX_ITER, 10, 1. 0)
  10. flags = cv 2.KMEANS_RANDOM_CENTERS
  11. ret,label,clusCenter = cv 2.kmeans(imgVec, 2,None,criteria, 10,flags)
  12. clusCenter = np.uint 8(clusCenter)
  13. clusResult = clusCenter[label.flatten()]
  14. imgres = clusResult.reshape((srcImage.shape))
  15. imgres = cv 2.cvtColor(imgres,cv 2.COLOR_BGR 2GRAY)
  16. max=int(np.max(imgres))
  17. min=int(np.min(imgres))
  18. bwThresh = (max+min)/ 2
  19. _,thresh = cv 2.threshold(imgres,bwThresh, 255,cv 2.THRESH_BINARY_INV)
  20. thresh1=np.zeros(thresh.shape,np.uint 8)
  21. thresh1[thresh== 0]= 255
  22. kernel=cv 2.getStructuringElement(cv 2.MORPH_RECT,( 9, 9))
  23. test_ONE_2 = cv 2.dilate(thresh 1, kernel)
  24. # threshRotate = cv2.merge([thresh,thresh,thresh])
  25. # cv2.imshow("thresh",thresh)
  26. # cv2.waitKey(0)
  27. # 确定前景外接矩形
  28. # find contours
  29. contours, hierarchy = cv 2.findContours(test_ONE_ 2, cv 2.RETR_TREE , cv 2.CHAIN_APPROX_NONE )
  30. maxconArea = 0
  31. maxAreaPos = - 1
  32. for i in range(len(contours)):
  33. if maxconArea < cv 2.contourArea(contours[i]):
  34. maxconArea = cv 2.contourArea(contours[i])
  35. maxAreaPos = i
  36. objCont = contours[maxAreaPos]
  37. print(maxconArea)
  38. rect = cv 2.minAreaRect(objCont)
  39. box = cv 2.boxPoints(rect)
  40. box = np.int 0(box)
  41. srcImage=cv 2.drawContours(srcImage,[box],- 1,( 255, 255, 0), 3)
  42. # for c in contours:
  43. # # if cv2.contourArea(c) > 2000:
  44. # rect = cv2.minAreaRect(c)
  45. # # 计算最小区域的坐标
  46. # box = cv2.boxPoints(rect)
  47. # # 坐标规范化为整数
  48. # box = np.int0(box)
  49. # cv2.drawContours(srcImage, [box], 0, (255, 0, 0), 3)
  50. cv2.imshow( "srcImage",srcImage)
  51. # cv2.imshow("thresh1",thresh1)
  52. cv2.imshow( "imgres",imgres)
  53. # cv2.imshow("test_ONE_2",test_ONE_2)
  54. cv2.waitKey( 0)
  55. def panelAbstract(srcImage):
  56. # read pic shape
  57. imgHeight,imgWidth = srcImage.shape[: 2]
  58. imgHeight = int(imgHeight);imgWidth = int(imgWidth)
  59. # 均值聚类提取前景:二维转一维
  60. imgVec = np.float 32(srcImage.reshape((- 1, 3)))
  61. criteria = (cv 2.TERM_CRITERIA_EPS + cv 2.TERM_CRITERIA_MAX_ITER, 10, 1. 0)
  62. flags = cv 2.KMEANS_RANDOM_CENTERS
  63. ret,label,clusCenter = cv 2.kmeans(imgVec, 2,None,criteria, 10,flags)
  64. clusCenter = np.uint 8(clusCenter)
  65. clusResult = clusCenter[label.flatten()]
  66. imgres = clusResult.reshape((srcImage.shape))
  67. imgres = cv 2.cvtColor(imgres,cv 2.COLOR_BGR 2GRAY)
  68. bwThresh = int((int(np.max(imgres))+int(np.min(imgres)))/ 2)
  69. _,thresh = cv 2.threshold(imgres,bwThresh, 255,cv 2.THRESH_BINARY_INV)
  70. threshRotate = cv 2.merge([thresh,thresh,thresh])
  71. # 确定前景外接矩形
  72. #find contours
  73. kernel=cv 2.getStructuringElement(cv 2.MORPH_RECT,( 9, 9))
  74. thresh1=np.zeros(thresh.shape,np.uint 8)
  75. thresh1[thresh== 0]= 255
  76. test_ONE_2 = cv 2.dilate(thresh 1, kernel)
  77. contours, hierarchy = cv 2.findContours(test_ONE_ 2,cv 2.RETR_EXTERNAL,cv 2.CHAIN_APPROX_SIMPLE)
  78. minvalx = np.max([imgHeight,imgWidth]);maxvalx = 0
  79. minvaly = np.max([imgHeight,imgWidth]);maxvaly = 0
  80. maxconArea = 0;maxAreaPos = - 1
  81. for i in range(len(contours)):
  82. if maxconArea < cv 2.contourArea(contours[i]):
  83. maxconArea = cv 2.contourArea(contours[i])
  84. maxAreaPos = i
  85. objCont = contours[maxAreaPos]
  86. rect = cv 2.minAreaRect(objCont)
  87. # box = cv2.boxPoints(rect)
  88. # box = np.int0(box)
  89. # srcImage=cv2.drawContours(srcImage,[box],-1,(255,255,0),3)
  90. # # for c in contours:
  91. # # # if cv2.contourArea(c) > 2000:
  92. # # rect = cv2.minAreaRect(c)
  93. # # # 计算最小区域的坐标
  94. # # box = cv2.boxPoints(rect)
  95. # # # 坐标规范化为整数
  96. # # box = np.int0(box)
  97. # # cv2.drawContours(srcImage, [box], 0, (255, 0, 0), 3)
  98. # cv2.imshow("srcImage",srcImage)
  99. # # cv2.imshow("thresh1",thresh1)
  100. # cv2.imshow("imgres",imgres)
  101. # # cv2.imshow("test_ONE_2",test_ONE_2)
  102. # cv2.waitKey(0)
  103. # # 旋转校正前景
  104. # rect = cv2.minAreaRect(objCont)
  105. for j in range(len(objCont)):
  106. minvaly = np.min([minvaly,objCont[j][ 0][ 0]])
  107. maxvaly = np.max([maxvaly,objCont[j][ 0][ 0]])
  108. minvalx = np.min([minvalx,objCont[j][ 0][ 1]])
  109. maxvalx = np.max([maxvalx,objCont[j][ 0][ 1]])
  110. if rect[ 2] <=- 45:
  111. rotAgl = 90 +rect[ 2]
  112. else:
  113. rotAgl = rect[ 2]
  114. if rotAgl == 0:
  115. panelImg = srcImage[minvalx:maxvalx,minvaly:maxvaly,:]
  116. else:
  117. rotCtr = rect[ 0]
  118. rotCtr = (int(rotCtr[ 0]),int(rotCtr[ 1]))
  119. rotMdl = cv 2.getRotationMatrix 2D(rotCtr,rotAgl, 1)
  120. imgHeight,imgWidth = srcImage.shape[: 2]
  121. #图像的旋转
  122. dstHeight = math.sqrt(imgWidth *imgWidth + imgHeight*imgHeight)
  123. dstRotimg = cv 2.warpAffine(threshRotate,rotMdl,(int(dstHeight),int(dstHeight)))
  124. dstImage = cv 2.warpAffine(srcImage,rotMdl,(int(dstHeight),int(dstHeight)))
  125. dstRotimg = cv 2.cvtColor(dstRotimg,cv 2.COLOR_BGR 2GRAY)
  126. cv2.imshow('dstRotimg',dstRotimg)
  127. cv2.waitKey( 0)
  128. _,dstRotBW = cv 2.threshold(dstRotimg, 127, 255, 0)
  129. contours, hierarchy = cv 2.findContours(dstRotBW,cv 2.RETR_EXTERNAL,cv 2.CHAIN_APPROX_SIMPLE)
  130. maxcntArea = 0;maxAreaPos = - 1
  131. for i in range(len(contours)):
  132. if maxcntArea < cv 2.contourArea(contours[i]):
  133. maxcntArea = cv 2.contourArea(contours[i])
  134. maxAreaPos = i
  135. x,y,w,h = cv 2.boundingRect(contours[maxAreaPos])
  136. #提取前景:panel
  137. panelImg = dstImage[int(y):int(y+h),int(x):int(x+w),:]
  138. return panelImg
  139. # 使用grabCut算法
  140. def grabcut_img(img):
  141. mask = np.zeros(img.shape[: 2], np.uint 8)
  142. # 背景模型
  143. bgdModel = np.zeros(( 1, 65), np.float 64)
  144. # 前景模型
  145. fgdModel = np.zeros(( 1, 65), np.float 64)
  146. rect = ( 0, 0, 512, 640)
  147. # 使用grabCut算法
  148. cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 5, cv 2.GC_INIT_WITH_RECT)
  149. mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint 8')
  150. img = img * mask 2[:, :, np.newaxis]
  151. cv2.imshow( "thresh",img)
  152. cv2.imshow( "mask2",mask 2)
  153. cv2.waitKey( 0)
  154. path=r "C:\Users\39314\Desktop\qingdao"
  155. for file in os.listdir(path):
  156. if file.endswith('jpg'):
  157. file_path=os.path.join(path,file)
  158. srcImage=cv 2.imread(file_path)
  159. kmean_img(srcImage)
  160. # kmean_img(srcImage)
  161. # a = panelAbstract(srcImage)
  162. # cv2.imshow('figa', a)
  163. # cv2.waitKey(0)
  164. # cv2.destroyAllWindows()

7.方法柔和


  
  1. import cv 2
  2. import numpy as np
  3. import math
  4. import os
  5. #首先背景分割,切出需要检测的范围
  6. #然后使用双线滤波的canny边缘检测算子
  7. def kmean_img(srcImage):
  8. dstimg=np.zeros(srcImage.shape,np.uint 8)
  9. imgVec = np.float 32(srcImage.reshape((- 1, 3)))
  10. criteria = (cv 2.TERM_CRITERIA_EPS + cv 2.TERM_CRITERIA_MAX_ITER, 10, 1. 0)
  11. flags = cv 2.KMEANS_RANDOM_CENTERS
  12. ret,label,clusCenter = cv 2.kmeans(imgVec, 2,None,criteria, 10,flags)
  13. clusCenter = np.uint 8(clusCenter)
  14. clusResult = clusCenter[label.flatten()]
  15. imgres = clusResult.reshape((srcImage.shape))
  16. imgres = cv 2.cvtColor(imgres,cv 2.COLOR_BGR 2GRAY)
  17. max=int(np.max(imgres))
  18. min=int(np.min(imgres))
  19. bwThresh = (max+min)/ 2
  20. _,thresh = cv 2.threshold(imgres,bwThresh, 255,cv 2.THRESH_BINARY_INV)
  21. thresh1=np.zeros(thresh.shape,np.uint 8)
  22. thresh1[thresh== 0]= 255
  23. kernel=cv 2.getStructuringElement(cv 2.MORPH_RECT,( 9, 9))
  24. test_ONE_2 = cv 2.dilate(thresh 1, kernel)
  25. contours, hierarchy = cv 2.findContours(test_ONE_ 2, cv 2.RETR_TREE , cv 2.CHAIN_APPROX_NONE )
  26. maxconArea = 0
  27. maxAreaPos = - 1
  28. for i in range(len(contours)):
  29. if maxconArea < cv 2.contourArea(contours[i]):
  30. maxconArea = cv 2.contourArea(contours[i])
  31. maxAreaPos = i
  32. objCont = contours[maxAreaPos]
  33. rect = cv 2.minAreaRect(objCont)
  34. box = cv 2.boxPoints(rect)
  35. box = np.int 0(box)
  36. minx=np.min(box[:, 1])
  37. miny=np.min(box[:, 0])
  38. maxx=np.max(box[:, 1])
  39. maxy=np.max(box[:, 0])
  40. dstimg[minx:maxx,miny:maxy]=srcImage[minx:maxx,miny:maxy]
  41. return dstimg
  42. # # srcImage=cv2.drawContours(srcImage,[box],-1,(255,255,0),3)
  43. # srcImage = cv2.drawContours(srcImage, [box], -1, (255, 255, 0), 3)
  44. # cv2.imshow("srcImage", srcImage)
  45. # cv2.imshow("dstimg",dstimg)
  46. # # cv2.imshow("thresh1",thresh1)
  47. # cv2.imshow("imgres",imgres)
  48. # # cv2.imshow("test_ONE_2",test_ONE_2)
  49. # cv2.waitKey(0)
  50. def canny(img):
  51. gray = cv 2.cvtColor(img, cv 2.COLOR_BGR 2GRAY)
  52. blur=cv 2.GaussianBlur(gray,( 5, 5), 0)
  53. canny_car = cv 2.Canny(blur, 200, 256)
  54. return canny_car
  55. def bilateralFilter_canny(img):
  56. gray = cv 2.cvtColor(img, cv 2.COLOR_BGR 2GRAY)
  57. blur = cv 2.bilateralFilter(gray, 9, 10, 30)
  58. canny_car=my_canny(blur)
  59. higth = 11
  60. width = 3
  61. kernel1_temp = cv 2.getStructuringElement(cv 2.MORPH_RECT, (width, higth))
  62. kernel2_temp = cv 2.getStructuringElement(cv 2.MORPH_RECT, (higth, width))
  63. test_ONE_1 = cv 2.morphologyEx(canny_car, cv 2.MORPH_CLOSE, kernel 1_temp)
  64. test_ONE_2 = cv 2.morphologyEx(test_ONE_ 1, cv 2.MORPH_OPEN, kernel 1_temp)
  65. test_TWO_1 = cv 2.morphologyEx(canny_car, cv 2.MORPH_CLOSE, kernel 2_temp)
  66. test_TWO_2 = cv 2.morphologyEx(test_TWO_ 1, cv 2.MORPH_OPEN, kernel 2_temp)
  67. test_combine = cv 2.bitwise_and(test_ONE_ 2 ,test_TWO_ 2)
  68. # canny_car = cv2.Canny(blur, 200, 256)
  69. return test_combine
  70. def my_canny(gray):
  71. rows, cols = gray.shape
  72. gradientY = np.zeros((rows, cols), np.uint 8)
  73. gradientXY = np.zeros((rows, cols), np.uint 8)
  74. pointDirection = np.zeros((rows, cols), np.uint 8)
  75. highThreshold = 100
  76. lowThreshold = 20
  77. sobelx = [[-1, 0, 1],
  78. [-1, 0, 1],
  79. [-1, 0, 1]]
  80. sobely = [[1, 2, 1],
  81. [0, 0, 0],
  82. [-1, -2, -1]]
  83. sobelx = np.array(sobelx)
  84. sobely = np.array(sobely)
  85. sobel_x_h, sobel_x_w = sobelx.shape
  86. sobel_y_h, sobel_y_w = sobely.shape
  87. pad_x_img = np.pad(gray, ((sobel_x_h // 2, sobel_x_h // 2), (sobel_x_w // 2, sobel_x_w // 2)))
  88. pad_y_img = np.pad(gray, ((sobel_y_h // 2, sobel_y_h // 2), (sobel_y_w // 2, sobel_y_w // 2)))
  89. gradientX = cv 2.filter 2D(pad_x_img, - 1, sobelx)
  90. for i in range(rows):
  91. for j in range(cols):
  92. cur_output_x = pad_x_img[i:i + sobel_x_h, j:j + sobel_x_w] * sobelx
  93. cur_output_y = pad_y_img[i:i + sobel_y_h, j:j + sobel_y_w] * sobely
  94. conv_sum_x = np.sum(cur_output_x)
  95. conv_sum_y = np.sum(cur_output_y)
  96. gradientY[i, j] = conv_sum_y
  97. gradientX[i, j] = conv_sum_x
  98. gradientXY[i, j] = np.sqrt(conv_sum_y ** 2 + conv_sum_x ** 2)
  99. pointDirection[i, j] = np.arctan(conv_sum_y / conv_sum_x)
  100. outputImage = gradientXY.copy()
  101. for i in range( 1, rows - 1):
  102. for j in range( 1, cols - 1):
  103. NE = gradientXY[i - 1, j + 1]
  104. NW = gradientXY[i - 1, j - 1]
  105. N = gradientXY[i - 1, j]
  106. W = gradientXY[i, j - 1]
  107. E = gradientXY[i, j + 1]
  108. SW = gradientXY[i + 1, j - 1]
  109. S = gradientXY[i + 1, j]
  110. SE = gradientXY[i + 1, j + 1]
  111. theta = pointDirection[i, j]
  112. if theta <= np.pi / 4 and theta >= 0:
  113. gp1 = ( 1 - np.tan(theta)) * E + np.tan(theta) * NE
  114. gp2 = ( 1 - np.tan(theta)) * W + np.tan(theta) * SW
  115. elif theta > np.pi / 4:
  116. gp1 = ( 1 - 1 / np.tan(theta)) * N + 1 / np.tan(theta) * NE
  117. gp2 = ( 1 - 1 / np.tan(theta)) * S + 1 / np.tan(theta) * SW
  118. elif theta < 0 and theta >= -np.pi / 4:
  119. gp1 = ( 1 - np.tan(-theta)) * E + np.tan(-theta) * SE
  120. gp2 = ( 1 - np.tan(-theta)) * W + np.tan(-theta) * NW
  121. else:
  122. gp1 = ( 1 - 1 / np.tan(-theta)) * S + 1 / np.tan(-theta) * SE
  123. gp2 = ( 1 - 1 / np.tan(-theta)) * N + 1 / np.tan(-theta) * NW
  124. if gradientXY[i, j] < gp 1 or gradientXY[i, j] < gp 2:
  125. outputImage[i, j] = 0
  126. highPoints = []
  127. for i in range( 1, rows - 1):
  128. for j in range( 1, cols - 1):
  129. if outputImage[i, j] >= highThreshold:
  130. outputImage[i, j] = 255
  131. highPoints.append([i, j])
  132. elif outputImage[i, j] < lowThreshold:
  133. outputImage[i, j] = 0
  134. def DoubleThresholdLinkRecurrent(image, lowThreshold, i, j):
  135. if i <= 0 or j <= 0 or i >= rows - 1 or j >= cols - 1:
  136. return
  137. if image[i - 1, j - 1] >= lowThreshold and image[i - 1, j - 1] < 255:
  138. image[i - 1, j - 1] = 255
  139. DoubleThresholdLinkRecurrent(image, lowThreshold, i - 1, j - 1)
  140. if (image[i - 1, j] >= lowThreshold and image[i - 1, j] < 255):
  141. image[i - 1, j] = 255
  142. DoubleThresholdLinkRecurrent(image, lowThreshold, i - 1, j)
  143. if (image[i - 1, j + 1] >= lowThreshold and image[i - 1, j + 1] < 255):
  144. image[i - 1, j + 1] = 255
  145. DoubleThresholdLinkRecurrent(image, lowThreshold, i - 1, j + 1)
  146. if (image[i, j - 1] >= lowThreshold and image[i, j - 1] < 255):
  147. image[i, j - 1] = 255
  148. DoubleThresholdLinkRecurrent(image, lowThreshold, i, j - 1)
  149. if (image[i, j + 1] >= lowThreshold and image[i, j + 1] < 255):
  150. image[i, j + 1] = 255
  151. DoubleThresholdLinkRecurrent(image, lowThreshold, i, j + 1)
  152. if (image[i + 1, j - 1] >= lowThreshold and image[i + 1, j - 1] < 255):
  153. image[i + 1, j - 1] = 255
  154. DoubleThresholdLinkRecurrent(image, lowThreshold, i + 1, j - 1)
  155. if (image[i + 1, j] >= lowThreshold and image[i + 1, j] < 255):
  156. image[i + 1, j] = 255
  157. DoubleThresholdLinkRecurrent(image, lowThreshold, i + 1, j)
  158. if (image[i + 1, j + 1] >= lowThreshold and image[i + 1, j + 1] < 255):
  159. image[i + 1, j + 1] = 255
  160. DoubleThresholdLinkRecurrent(image, lowThreshold, i + 1, j + 1)
  161. for poinst in highPoints:
  162. DoubleThresholdLinkRecurrent(outputImage, lowThreshold, poinst[ 0], poinst[ 1])
  163. for i in range( 1, rows - 1):
  164. for j in range( 1, cols - 1):
  165. if outputImage[i, j] < 255:
  166. outputImage[i, j] = 0
  167. return outputImage
  168. srcImage=cv 2.imread( "15_13_57_06.jpg")
  169. dstimg=kmean_img(srcImage)
  170. canny_car=bilateralFilter_canny(dstimg)
  171. cv2.imshow( "canny_car",canny_car)
  172. cv2.waitKey( 0)

 


转载:https://blog.csdn.net/qq_33228039/article/details/115913414
查看评论
* 以上用户言论只代表其个人观点,不代表本网站的观点或立场