build a web app that uses the camera that detects human and shows the skeleton detected (like a virtual mirror). The screen should show the camera feed and the skeletone should be presented over the camera canvas in real time. Make use of mediapipe (MediaPipe BlazePose GHUM model) libraries. Show each skeleton line and add to it the REAL cm length. use the shoulder length of 35 cm as scale and overlay on the screen in real-time. make use of the 3d ponts (x,y,z) for the distance.
To upload files, please first save the app
import streamlit as st
import mediapipe as mp
import cv2
import numpy as np
from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
import av
import math
# Initialize MediaPipe Pose
mp_pose = mp.solutions.pose
pose = mp_pose.Pose(
static_image_mode=False,
model_complexity=2,
enable_segmentation=False,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
)
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
# Constants
REFERENCE_SHOULDER_LENGTH_CM = 35.0 # Reference shoulder length in cm
def calculate_distance(point1, point2):
"""Calculate 3D distance between two points"""
return math.sqrt(
(point1.x - point2.x)**2 +
(point1.y - point2.y)**2 +
(point1.z - point2.z)**2
)
def calculate_scale_factor(landmarks):
"""Calculate scale factor based on shoulder width"""
left_shoulder = landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER]
right_shoulder = landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER]
shoulder_length_pixels = calculate_distance(left_shoulder, right_shoulder)
return REFERENCE_SHOULDER_LENGTH_CM / shoulder_length_pixels if shoulder_length_pixels > 0 else 1
def draw_measurements(image, landmarks, scale_factor):
"""Draw skeleton and measurements on the image"""
# Define connections we want to measure
connections = [
(mp_pose.PoseLandmark.LEFT_SHOULDER, mp_pose.PoseLandmark.RIGHT_SHOULDER, 'Shoulder width'),
(mp_pose.PoseLandmark.LEFT_SHOULDER, mp_pose.PoseLandmark.LEFT_ELBOW, 'Left upper arm'),
(mp_pose.PoseLandmark.RIGHT_SHOULDER, mp_pose.PoseLandmark.RIGHT_ELBOW, 'Right upper arm'),
(mp_pose.PoseLandmark.LEFT_ELBOW, mp_pose.PoseLandmark.LEFT_WRIST, 'Left forearm'),
(mp_pose.PoseLandmark.RIGHT_ELBOW, mp_pose.PoseLandmark.RIGHT_WRIST, 'Right forearm'),
(mp_pose.PoseLandmark.LEFT_HIP, mp_pose.PoseLandmark.RIGHT_HIP, 'Hip width'),
(mp_pose.PoseLandmark.LEFT_HIP, mp_pose.PoseLandmark.LEFT_KNEE, 'Left thigh'),
(mp_pose.PoseLandmark.RIGHT_HIP, mp_pose.PoseLandmark.RIGHT_KNEE, 'Right thigh'),
(mp_pose.PoseLandmark.LEFT_KNEE, mp_pose.PoseLandmark.LEFT_ANKLE, 'Left shin'),
(mp_pose.PoseLandmark.RIGHT_KNEE, mp_pose.PoseLandmark.RIGHT_ANKLE, 'Right shin'),
]
# Draw skeleton
mp_drawing.draw_landmarks(
image,
landmarks,
mp_pose.POSE_CONNECTIONS,
landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style()
)
# Draw measurements
for start_point, end_point, label in connections:
start = landmarks.landmark[start_point]
end = landmarks.landmark[end_point]
# Calculate real-world distance
distance_pixels = calculate_distance(start, end)
distance_cm = distance_pixels * scale_factor
# Calculate midpoint for text placement
x_mid = int((start.x + end.x) * image.shape[1] / 2)
y_mid = int((start.y + end.y) * image.shape[0] / 2)
# Draw measurement text
cv2.putText(
image,
f'{distance_cm:.1f}cm',
(x_mid, y_mid),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
2
)
class VideoProcessor:
def recv(self, frame):
img = frame.to_ndarray(format="bgr24")
# Process the image with MediaPipe Pose
results = pose.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
if results.pose_landmarks:
# Calculate scale factor based on shoulder width
scale_factor = calculate_scale_factor(results.pose_landmarks)
# Draw skeleton and measurements
draw_measurements(img, results.pose_landmarks, scale_factor)
return av.VideoFrame.from_ndarray(img, format="bgr24")
st.title("Real-time Pose Measurement")
st.write("Stand in front of the camera to see your skeletal measurements!")
# WebRTC configuration
RTC_CONFIGURATION = RTCConfiguration(
{"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}
)
# Create WebRTC streamer
webrtc_ctx = webrtc_streamer(
key="pose-measurement",
mode=WebRtcMode.SENDRECV,
rtc_configuration=RTC_CONFIGURATION,
video_processor_factory=VideoProcessor,
media_stream_constraints={"video": True, "audio": False},
async_processing=True,
)
Hi! I can help you with any questions about Streamlit and Python. What would you like to know?