summaryrefslogtreecommitdiff
path: root/pose
diff options
context:
space:
mode:
authorzhang <zch921005@126.com>2022-06-04 08:48:54 +0800
committerzhang <zch921005@126.com>2022-06-04 08:48:54 +0800
commit44097da7288042e988bcb89f1c6cc817a8e1eec9 (patch)
tree7c28da29f31ca429b8d27ed7441d650f1195812d /pose
parent0f885b830ac552bfd357dec74c70e1349434b58b (diff)
0604
Diffstat (limited to 'pose')
-rw-r--r--pose/1.mp4bin0 -> 13892865 bytes
-rw-r--r--pose/4.jpgbin0 -> 333738 bytes
-rw-r--r--pose/PoseEstimation.py64
3 files changed, 64 insertions, 0 deletions
diff --git a/pose/1.mp4 b/pose/1.mp4
new file mode 100644
index 0000000..7435e06
--- /dev/null
+++ b/pose/1.mp4
Binary files differ
diff --git a/pose/4.jpg b/pose/4.jpg
new file mode 100644
index 0000000..04f7515
--- /dev/null
+++ b/pose/4.jpg
Binary files differ
diff --git a/pose/PoseEstimation.py b/pose/PoseEstimation.py
new file mode 100644
index 0000000..cdd594a
--- /dev/null
+++ b/pose/PoseEstimation.py
@@ -0,0 +1,64 @@
+# Created by MediaPipe
+# Modified by Augmented Startups 2021
+# Pose-Estimation in 5 Minutes
+# Watch 5 Minute Tutorial at www.augmentedstartups.info/YouTube
+import cv2
+import mediapipe as mp
+import time
+import os
+
+mp_drawing = mp.solutions.drawing_utils
+mp_pose = mp.solutions.pose
+mp_holistic = mp.solutions.holistic
+
+# For static images:
+with mp_pose.Pose(
+ static_image_mode=True,
+ # model_complexity=2,
+ min_detection_confidence=0.5) as pose:
+ image = cv2.imread('4.jpg') # Insert your Image Here
+ image_height, image_width, _ = image.shape
+ # Convert the BGR image to RGB before processing.
+ results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
+ # Draw pose landmarks on the image.
+ annotated_image = image.copy()
+ mp_drawing.draw_landmarks(annotated_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
+ cv2.imwrite(r'4.png', annotated_image)
+
+# For webcam input:
+# cap = cv2.VideoCapture(0)
+cap = cv2.VideoCapture("1.mp4")
+# For Video input:
+prevTime = 0
+with mp_pose.Pose(
+ min_detection_confidence=0.5,
+ min_tracking_confidence=0.5) as pose:
+ while cap.isOpened():
+ success, image = cap.read()
+ if not success:
+ print("Ignoring empty camera frame.")
+ # If loading a video, use 'break' instead of 'continue'.
+ continue
+
+ # Convert the BGR image to RGB.
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ # To improve performance, optionally mark the image as not writeable to
+ # pass by reference.
+ image.flags.writeable = False
+ results = pose.process(image)
+
+ # Draw the pose annotation on the image.
+ image.flags.writeable = True
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
+ mp_drawing.draw_landmarks(
+ image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
+ currTime = time.time()
+ fps = 1 / (currTime - prevTime)
+ prevTime = currTime
+ cv2.putText(image, f'FPS: {int(fps)}', (20, 70), cv2.FONT_HERSHEY_PLAIN, 3, (0, 196, 255), 2)
+ cv2.imshow('BlazePose', image)
+ if cv2.waitKey(5) & 0xFF == 27:
+ break
+cap.release()
+# Learn more AI in Computer Vision by Enrolling in our AI_CV Nano Degree:
+# https://bit.ly/AugmentedAICVPRO