小言_互联网的博客

Opencv项目实战:15 手势虚拟缩放

344人阅读  评论(0)

目录

0、项目介绍

1、项目展示

2、项目搭建

3、项目的代码与讲解

4、项目资源

5、项目总结


0、项目介绍

        本篇将会以HandTrackingModule为模块,这里的模块与之前的有所不同,请按照本篇为准,前面的HandTrackingModule不足以完成本项目,本篇将会通过手势对本人的博客海报进行缩放,具体效果可以看下面的效果展示。

1、项目展示

2、项目搭建

首先在一个文件夹下建立HandTrackingModule.py文件以及gesture_zoom.py,以及一张图片,你可以按照你的喜好选择,建议尺寸不要过大。

在这里用到了食指的索引8,可以完成左右手食指的手势进行缩放。

3、项目的代码与讲解

HandTrackingModule.py:


  
  1. import cv2
  2. import mediapipe as mp
  3. import math
  4. class handDetector:
  5. def __init__( self, mode=False, maxHands=2, detectionCon=0.5, minTrackCon=0.5):
  6. self.mode = mode
  7. self.maxHands = maxHands
  8. self.detectionCon = detectionCon
  9. self.minTrackCon = minTrackCon
  10. self.mpHands = mp.solutions.hands
  11. self.hands = self.mpHands.Hands(static_image_mode=self.mode, max_num_hands=self.maxHands,
  12. min_detection_confidence=self.detectionCon,
  13. min_tracking_confidence=self.minTrackCon)
  14. self.mpDraw = mp.solutions.drawing_utils
  15. self.tipIds = [ 4, 8, 12, 16, 20]
  16. self.fingers = []
  17. self.lmList = []
  18. def findHands( self, img, draw=True, flipType=True):
  19. imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
  20. self.results = self.hands.process(imgRGB)
  21. allHands = []
  22. h, w, c = img.shape
  23. if self.results.multi_hand_landmarks:
  24. for handType, handLms in zip(self.results.multi_handedness, self.results.multi_hand_landmarks):
  25. myHand = {}
  26. ## lmList
  27. mylmList = []
  28. xList = []
  29. yList = []
  30. for id, lm in enumerate(handLms.landmark):
  31. px, py, pz = int(lm.x * w), int(lm.y * h), int(lm.z * w)
  32. mylmList.append([px, py])
  33. xList.append(px)
  34. yList.append(py)
  35. ## bbox
  36. xmin, xmax = min(xList), max(xList)
  37. ymin, ymax = min(yList), max(yList)
  38. boxW, boxH = xmax - xmin, ymax - ymin
  39. bbox = xmin, ymin, boxW, boxH
  40. cx, cy = bbox[ 0] + (bbox[ 2] // 2), \
  41. bbox[ 1] + (bbox[ 3] // 2)
  42. myHand[ "lmList"] = mylmList
  43. myHand[ "bbox"] = bbox
  44. myHand[ "center"] = (cx, cy)
  45. if flipType:
  46. if handType.classification[ 0].label == "Right":
  47. myHand[ "type"] = "Left"
  48. else:
  49. myHand[ "type"] = "Right"
  50. else:
  51. myHand[ "type"] = handType.classification[ 0].label
  52. allHands.append(myHand)
  53. ## draw
  54. if draw:
  55. self.mpDraw.draw_landmarks(img, handLms,
  56. self.mpHands.HAND_CONNECTIONS)
  57. cv2.rectangle(img, (bbox[ 0] - 20, bbox[ 1] - 20),
  58. (bbox[ 0] + bbox[ 2] + 20, bbox[ 1] + bbox[ 3] + 20),
  59. ( 255, 0, 255), 2)
  60. cv2.putText(img, myHand[ "type"], (bbox[ 0] - 30, bbox[ 1] - 30), cv2.FONT_HERSHEY_PLAIN,
  61. 2, ( 255, 0, 255), 2)
  62. if draw:
  63. return allHands, img
  64. else:
  65. return allHands
  66. def fingersUp( self, myHand):
  67. myHandType = myHand[ "type"]
  68. myLmList = myHand[ "lmList"]
  69. if self.results.multi_hand_landmarks:
  70. fingers = []
  71. # Thumb
  72. if myHandType == "Right":
  73. if myLmList[self.tipIds[ 0]][ 0] > myLmList[self.tipIds[ 0] - 1][ 0]:
  74. fingers.append( 1)
  75. else:
  76. fingers.append( 0)
  77. else:
  78. if myLmList[self.tipIds[ 0]][ 0] < myLmList[self.tipIds[ 0] - 1][ 0]:
  79. fingers.append( 1)
  80. else:
  81. fingers.append( 0)
  82. # 4 Fingers
  83. for id in range( 1, 5):
  84. if myLmList[self.tipIds[ id]][ 1] < myLmList[self.tipIds[ id] - 2][ 1]:
  85. fingers.append( 1)
  86. else:
  87. fingers.append( 0)
  88. return fingers
  89. def findDistance( self, p1, p2, img=None):
  90. x1, y1 = p1
  91. x2, y2 = p2
  92. cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
  93. length = math.hypot(x2 - x1, y2 - y1)
  94. info = (x1, y1, x2, y2, cx, cy)
  95. if img is not None:
  96. cv2.circle(img, (x1, y1), 15, ( 255, 0, 255), cv2.FILLED)
  97. cv2.circle(img, (x2, y2), 15, ( 255, 0, 255), cv2.FILLED)
  98. cv2.line(img, (x1, y1), (x2, y2), ( 255, 0, 255), 3)
  99. cv2.circle(img, (cx, cy), 15, ( 255, 0, 255), cv2.FILLED)
  100. return length, info, img
  101. else:
  102. return length, info
  103. def main():
  104. cap = cv2.VideoCapture( 0)
  105. detector = handDetector(detectionCon= 0.8, maxHands= 2)
  106. while True:
  107. # Get image frame
  108. success, img = cap.read()
  109. # Find the hand and its landmarks
  110. hands, img = detector.findHands(img) # with draw
  111. # hands = detector.findHands(img, draw=False) # without draw
  112. if hands:
  113. # Hand 1
  114. hand1 = hands[ 0]
  115. lmList1 = hand1[ "lmList"] # List of 21 Landmark points
  116. bbox1 = hand1[ "bbox"] # Bounding box info x,y,w,h
  117. centerPoint1 = hand1[ 'center'] # center of the hand cx,cy
  118. handType1 = hand1[ "type"] # Handtype Left or Right
  119. fingers1 = detector.fingersUp(hand1)
  120. if len(hands) == 2:
  121. # Hand 2
  122. hand2 = hands[ 1]
  123. lmList2 = hand2[ "lmList"] # List of 21 Landmark points
  124. bbox2 = hand2[ "bbox"] # Bounding box info x,y,w,h
  125. centerPoint2 = hand2[ 'center'] # center of the hand cx,cy
  126. handType2 = hand2[ "type"] # Hand Type "Left" or "Right"
  127. fingers2 = detector.fingersUp(hand2)
  128. # Find Distance between two Landmarks. Could be same hand or different hands
  129. length, info, img = detector.findDistance(lmList1[ 8][ 0: 2], lmList2[ 8][ 0: 2], img) # with draw
  130. # length, info = detector.findDistance(lmList1[8], lmList2[8]) # with draw
  131. # Display
  132. cv2.imshow( "Image", img)
  133. cv2.waitKey( 1)
  134. if __name__ == "__main__":
  135. main()

gesture_zoom.py 


  
  1. import cv2
  2. import mediapipe as mp
  3. import time
  4. import HandTrackingModule as htm
  5. startDist = None
  6. scale = 0
  7. cx, cy = 500, 200
  8. wCam, hCam = 1280, 720
  9. pTime = 0
  10. cap = cv2.VideoCapture( 0)
  11. cap. set( 3, wCam)
  12. cap. set( 4, hCam)
  13. cap. set( 10, 150)
  14. detector = htm.handDetector(detectionCon= 0.75)
  15. while 1:
  16. success, img = cap.read()
  17. handsimformation,img=detector.findHands(img)
  18. img1 = cv2.imread( "1.png")
  19. # img[0:360, 0:260] = img1
  20. if len(handsimformation)== 2:
  21. # print(detector.fingersUp(handsimformation[0]),detector.fingersUp(handsimformation[1]))
  22. #detector.fingersUp(handimformation[0]右手
  23. if detector.fingersUp(handsimformation[ 0]) == [ 1, 1, 1, 0, 0] and \
  24. detector.fingersUp(handsimformation[ 1]) == [ 1, 1, 1 , 0, 0]:
  25. lmList1 = handsimformation[ 0][ 'lmList']
  26. lmList2 = handsimformation[ 1][ 'lmList']
  27. if startDist is None:
  28. #lmList1[8],lmList2[8]右、左手指尖
  29. # length,info,img=detector.findDistance(lmList1[8],lmList2[8], img)
  30. length, info, img = detector.findDistance(handsimformation[ 0][ "center"], handsimformation[ 1][ "center"], img)
  31. startDist=length
  32. length, info, img = detector.findDistance(handsimformation[ 0][ "center"], handsimformation[ 1][ "center"], img)
  33. # length, info, img = detector.findDistance(lmList1[8], lmList2[8], img)
  34. scale= int((length-startDist)// 2)
  35. cx, cy=info[ 4:]
  36. print(scale)
  37. else:
  38. startDist= None
  39. try:
  40. h1, w1, _ = img1.shape
  41. newH, newW = ((h1 + scale) // 2) * 2, ((w1 + scale) // 2) * 2
  42. img1 = cv2.resize(img1, (newW, newH))
  43. img[cy-newH// 2:cy+ newH// 2, cx-newW// 2:cx+newW// 2] = img1
  44. except:
  45. pass
  46. #################打印帧率#####################
  47. cTime = time.time()
  48. fps = 1 / (cTime - pTime)
  49. pTime = cTime
  50. cv2.putText(img, f'FPS: {int(fps)}', ( 40, 50), cv2.FONT_HERSHEY_COMPLEX,
  51. 1, ( 100, 0, 255), 3)
  52. cv2.imshow( "image",img)
  53. k=cv2.waitKey( 1)
  54. if k== 27:
  55. break

前面的类模块,我不做过多的讲解,它的新添加功能,我会在讲解主文件的时候提到。

  • 首先,导入我们需要的模块,第一步先编写打开摄像头的代码,确保摄像头的正常,并调节好窗口的设置——长、宽、亮度,并且用htm(HandTrackingModule的缩写,后面都是此意)handDetector调整置信度,让我们检测到手更准确。
  • 其次,用findHands的得到手的landmark,我所设定的手势是左右手的大拇指、食指、中指高于其他四指,也就是这六根手指竖起,我们按照[1, 1, 1, 0, 0],[1, 1, 1, 0, 0]来设定,如果你不能确定,请解除这里的代码;
#print(detector.fingersUp(handsimformation[0]),detector.fingersUp(handsimformation[1]))
  • 然后,在这里有两个handsimformation[0]['lmList'],handsimformation[0]["center"],分别代表我要取食指,和手掌中心点,那么展示的时候是用的中心点,可以按照个人的喜好去选择手掌的索引,startDist=None表示为没有检测到的手时的起始长度,而经过每次迭代后,获得的距离length-起始长度,如果我增大手的距离,我就能得到一个较大的scale,由于打印的scale太大,我不希望它变化太快,所以做了二分后取整,如果得到的是一个负值,那么就缩小图片,那么我们没有检测到手时,就要令startDist=None。
  • 之后来看,info = (x1, y1, x2, y2, cx, cy),根据索引得到中心值,然后,我们来获取现在海报的大小,然后加上我们scale,实现动态的缩放,但在这里要注意,这里进行了整出2,在乘以2的操作,如果是参数是偶数,我们无需理会,但如果遇到了奇数就会出现少一个像素点的问题,比如,值为9,整除2后得到的为4,4+4=8<9,所以为了确保正确,加了这一步。加入try...except语句是因为图像超出窗口时发出会发出警告,起到超出时此代码将不起作用,回到窗口时,可以继续操作。
  • 最后,打印出我们的帧率

4、项目资源

GitHub:https://github.com/Auorui/Opencv-project-training/tree/main/Opencv%20project%20training/15%20Gesture%20Zoom%20Picture

5、项目总结

本次项目完成了手势图片的虚拟缩放,如果你喜欢的话可以关注点赞加收藏。如果你们对于其他项目感兴趣,可以进入GitHub中,点击收藏。

 感谢大家的关注,如果你对于本项目较为喜欢,那么我会在评论中看到你哦。


转载:https://blog.csdn.net/m0_62919535/article/details/127714724
查看评论
* 以上用户言论只代表其个人观点,不代表本网站的观点或立场