build a web app that uses the camera that detects human and shows the skeleton detected (like a virtual mirror). The screen should show the camera feed and the skeletone should be presented over the camera canvas in real time. Make use of mediapipe (MediaPipe BlazePose GHUM model) libraries. Show each skeleton line and add to it the REAL cm length. use the shoulder length of 35 cm as scale and overlay on the screen in real-time. make use of the 3d ponts (x,y,z) for the distance.
To upload files, please first save the app
import streamlit as st
import mediapipe as mp
import cv2
import numpy as np
from mediapipe.framework.formats import landmark_pb2
# Initialize MediaPipe Pose
mp_pose = mp.solutions.pose
mp_drawing = mp.solutions.drawing_utils
pose = mp_pose.Pose(
min_detection_confidence=0.5,
min_tracking_confidence=0.5,
model_complexity=2 # Using the full model for better accuracy
)
# Function to calculate distance between two 3D points
def calculate_distance(point1, point2):
return np.sqrt(
(point1.x - point2.x)**2 +
(point1.y - point2.y)**2 +
(point1.z - point2.z)**2
)
# Function to calculate real-world distances based on shoulder width reference
def get_scale_factor(landmarks):
left_shoulder = landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER]
right_shoulder = landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER]
shoulder_distance = calculate_distance(left_shoulder, right_shoulder)
# Using 35cm as reference for shoulder width
return 35.0 / shoulder_distance
# Define the connections we want to measure
CONNECTIONS = [
(mp_pose.PoseLandmark.LEFT_SHOULDER, mp_pose.PoseLandmark.RIGHT_SHOULDER, "Shoulder width"),
(mp_pose.PoseLandmark.LEFT_SHOULDER, mp_pose.PoseLandmark.LEFT_ELBOW, "Left upper arm"),
(mp_pose.PoseLandmark.RIGHT_SHOULDER, mp_pose.PoseLandmark.RIGHT_ELBOW, "Right upper arm"),
(mp_pose.PoseLandmark.LEFT_ELBOW, mp_pose.PoseLandmark.LEFT_WRIST, "Left forearm"),
(mp_pose.PoseLandmark.RIGHT_ELBOW, mp_pose.PoseLandmark.RIGHT_WRIST, "Right forearm"),
(mp_pose.PoseLandmark.LEFT_HIP, mp_pose.PoseLandmark.RIGHT_HIP, "Hip width"),
(mp_pose.PoseLandmark.LEFT_HIP, mp_pose.PoseLandmark.LEFT_KNEE, "Left thigh"),
(mp_pose.PoseLandmark.RIGHT_HIP, mp_pose.PoseLandmark.RIGHT_KNEE, "Right thigh"),
(mp_pose.PoseLandmark.LEFT_KNEE, mp_pose.PoseLandmark.LEFT_ANKLE, "Left shin"),
(mp_pose.PoseLandmark.RIGHT_KNEE, mp_pose.PoseLandmark.RIGHT_ANKLE, "Right shin"),
]
st.title("Real-time Pose Analysis with Measurements")
st.write("Stand in front of the camera to see your skeleton with real-time measurements!")
# Initialize the webcam
camera = st.camera_input("Pose Analysis Camera")
if camera is not None:
# Convert the image from BGR to RGB
bytes_data = camera.getvalue()
cv2_img = cv2.imdecode(np.frombuffer(bytes_data, np.uint8), cv2.IMREAD_COLOR)
image = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
# Process the image
results = pose.process(image)
if results.pose_landmarks:
# Get scale factor based on shoulder width
scale_factor = get_scale_factor(results.pose_landmarks)
# Create a copy of the image to draw on
annotated_image = image.copy()
# Draw the pose landmarks
mp_drawing.draw_landmarks(
annotated_image,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)
)
# Draw measurements for each connection
for connection in CONNECTIONS:
start_point = results.pose_landmarks.landmark[connection[0]]
end_point = results.pose_landmarks.landmark[connection[1]]
# Calculate real-world distance
distance = calculate_distance(start_point, end_point)
real_distance = distance * scale_factor
# Calculate the midpoint for text placement
mid_x = int((start_point.x + end_point.x) * image.shape[1] / 2)
mid_y = int((start_point.y + end_point.y) * image.shape[0] / 2)
# Draw the measurement text
cv2.putText(
annotated_image,
f"{real_distance:.1f}cm",
(mid_x, mid_y),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
2
)
# Convert back to BGR for display
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_RGB2BGR)
# Display the annotated image
st.image(annotated_image, channels="BGR", use_column_width=True)
# Display measurements in a table
measurements = []
for connection in CONNECTIONS:
start_point = results.pose_landmarks.landmark[connection[0]]
end_point = results.pose_landmarks.landmark[connection[1]]
distance = calculate_distance(start_point, end_point)
real_distance = distance * scale_factor
measurements.append([connection[2], f"{real_distance:.1f}cm"])
st.table(measurements)
else:
st.warning("No pose detected. Please make sure you are visible in the camera.")
Hi! I can help you with any questions about Streamlit and Python. What would you like to know?