A web based interbased which evaluate a handwritten subjective or theorectical answersheet of a student and analysis the answer and give individual feedback
Drop files here
or click to upload
import streamlit as st
import pandas as pd
import numpy as np
import io
import os
import time
import matplotlib.pyplot as plt
from PIL import Image
import cv2
from openai import OpenAI
import base64
# Set page configuration
st.set_page_config(
page_title="Handwritten Answer Evaluator",
page_icon="📝",
layout="wide"
)
# Initialize the OpenAI client with the proxy
base_url = "https://editor.ploomber.io/_api/ai-proxy/v1"
api_key = os.environ["PLOOMBER_API_KEY"]
client = OpenAI(base_url=base_url, api_key=api_key)
# CSS for styling
st.markdown("""
<style>
.main-header {
font-size: 2.5rem;
color: #1E88E5;
text-align: center;
margin-bottom: 1rem;
}
.sub-header {
font-size: 1.5rem;
color: #333;
margin-bottom: 0.5rem;
}
.feedback-box {
background-color: #f8f9fa;
border-radius: 10px;
padding: 20px;
margin-bottom: 20px;
border-left: 5px solid #1E88E5;
}
.score-container {
display: flex;
justify-content: center;
align-items: center;
margin: 20px 0;
}
.score-circle {
width: 100px;
height: 100px;
border-radius: 50%;
background-color: #1E88E5;
color: white;
display: flex;
justify-content: center;
align-items: center;
font-size: 2rem;
font-weight: bold;
margin: 0 auto;
}
.metric-card {
background-color: #f1f7fe;
border-radius: 10px;
padding: 15px;
margin: 10px 0;
border-left: 3px solid #1E88E5;
}
</style>
""", unsafe_allow_html=True)
# Display the header
st.markdown("<h1 class='main-header'>Handwritten Answer Evaluator</h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center;'>Upload handwritten answer sheets for AI-powered evaluation and feedback</p>", unsafe_allow_html=True)
# Initialize session state variables
if 'feedback' not in st.session_state:
st.session_state.feedback = None
if 'score' not in st.session_state:
st.session_state.score = None
if 'detailed_analysis' not in st.session_state:
st.session_state.detailed_analysis = None
if 'strengths' not in st.session_state:
st.session_state.strengths = None
if 'areas_to_improve' not in st.session_state:
st.session_state.areas_to_improve = None
if 'suggestions' not in st.session_state:
st.session_state.suggestions = None
if 'evaluation_history' not in st.session_state:
st.session_state.evaluation_history = []
if 'current_question' not in st.session_state:
st.session_state.current_question = ""
if 'current_image' not in st.session_state:
st.session_state.current_image = None
# Function to preprocess the image
def preprocess_image(image):
# Convert to OpenCV format
img_array = np.array(image)
# Convert to grayscale if it's a color image
if len(img_array.shape) == 3:
gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
else:
gray = img_array
# Apply adaptive thresholding
binary = cv2.adaptiveThreshold(
gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2
)
# Invert the image to make text white and background black
binary = cv2.bitwise_not(binary)
# Convert back to PIL Image
enhanced_img = Image.fromarray(binary)
return enhanced_img
# Function to encode the image to base64
def encode_image(image):
buffered = io.BytesIO()
image.save(buffered, format="JPEG")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
# Function to analyze the handwritten answer
def analyze_answer(image, question):
enhanced_img = preprocess_image(image)
base64_image = encode_image(enhanced_img)
# Prepare the prompt for the OpenAI API
prompt = f"""
You are an expert teacher evaluating a student's handwritten answer.
Question: {question}
Analyze the handwritten answer image provided and:
1. Transcribe the handwritten text accurately
2. Evaluate the answer for correctness, completeness, and clarity
3. Assign a score out of 10
4. Provide detailed feedback on strengths and areas for improvement
5. Give constructive suggestions for improving the answer
Format your response as a JSON object with the following structure:
{{
"transcription": "The transcribed text from the image",
"score": a number between 0 and 10,
"overall_feedback": "Summary evaluation of the answer",
"detailed_analysis": "Detailed analysis of the answer content",
"strengths": ["Strength 1", "Strength 2", ...],
"areas_to_improve": ["Area 1", "Area 2", ...],
"suggestions": ["Suggestion 1", "Suggestion 2", ...]
}}
"""
try:
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
},
},
],
}
],
response_format={"type": "json_object"},
)
# Parse the response
result = response.choices[0].message.content
return eval(result)
except Exception as e:
st.error(f"Error analyzing answer: {str(e)}")
return None
# Main interface - sidebar
with st.sidebar:
st.header("Question Setup")
# Subject selection
subject = st.selectbox(
"Select Subject",
["Mathematics", "Physics", "Chemistry", "Biology", "History", "Literature", "Computer Science", "Other"]
)
# Question input
question = st.text_area(
"Enter the question to evaluate",
height=150,
help="Type the question that students were asked to answer"
)
# Pre-defined question templates
st.subheader("Question Templates")
if subject == "Mathematics":
template_questions = [
"Explain the concept of differentiation and its applications in real-world problems.",
"Prove the Pythagorean theorem and discuss its significance in mathematics.",
"Discuss the properties of logarithmic functions with examples."
]
elif subject == "Physics":
template_questions = [
"Explain Newton's laws of motion and give examples of each.",
"Describe the concept of electromagnetism and its applications.",
"Explain the theory of relativity and its implications for our understanding of space and time."
]
elif subject == "Chemistry":
template_questions = [
"Explain the periodic table's organization and how it relates to electron configuration.",
"Describe the process of chemical bonding and the different types of bonds.",
"Explain acid-base reactions and their importance in chemistry."
]
elif subject == "Biology":
template_questions = [
"Explain the process of photosynthesis and its importance for life on Earth.",
"Describe the structure and function of DNA in genetic inheritance.",
"Explain the theory of evolution and provide evidence supporting it."
]
elif subject == "History":
template_questions = [
"Analyze the causes and consequences of World War II.",
"Discuss the impact of the Industrial Revolution on society and economy.",
"Explain the significance of the Civil Rights Movement in the United States."
]
elif subject == "Literature":
template_questions = [
"Analyze the themes and symbolism in Shakespeare's 'Hamlet'.",
"Discuss the characteristics of Romanticism in literature with examples.",
"Compare and contrast the writing styles of two major authors from the 20th century."
]
elif subject == "Computer Science":
template_questions = [
"Explain the concept of object-oriented programming and its principles.",
"Describe the working of sorting algorithms and compare their efficiency.",
"Explain the concept of artificial intelligence and its current applications."
]
else:
template_questions = [
"Explain the concept and provide examples.",
"Analyze the following topic and provide your insights.",
"Compare and contrast the following concepts."
]
selected_template = st.selectbox("Select a template question", ["None"] + template_questions)
if selected_template != "None":
question = selected_template
st.session_state.current_question = question
if st.button("Set Question"):
st.session_state.current_question = question
st.success("Question set successfully!")
# Main content area
col1, col2 = st.columns([1, 1])
with col1:
st.markdown("<h2 class='sub-header'>Upload Answer Sheet</h2>", unsafe_allow_html=True)
# Display current question
if st.session_state.current_question:
st.markdown("### Current Question:")
st.info(st.session_state.current_question)
else:
st.warning("Please set a question in the sidebar first.")
# File uploader
uploaded_file = st.file_uploader("Upload a handwritten answer sheet", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
# Display the uploaded image
image = Image.open(uploaded_file)
st.session_state.current_image = image
st.image(image, caption="Uploaded Answer Sheet", use_column_width=True)
# Evaluate button
if st.button("Evaluate Answer") and st.session_state.current_question:
with st.spinner("Analyzing handwritten answer..."):
# Process the image and get feedback
result = analyze_answer(image, st.session_state.current_question)
if result:
st.session_state.feedback = result.get('overall_feedback')
st.session_state.score = result.get('score')
st.session_state.detailed_analysis = result.get('detailed_analysis')
st.session_state.strengths = result.get('strengths')
st.session_state.areas_to_improve = result.get('areas_to_improve')
st.session_state.suggestions = result.get('suggestions')
st.session_state.transcription = result.get('transcription')
# Add to history
st.session_state.evaluation_history.append({
'question': st.session_state.current_question,
'score': st.session_state.score,
'feedback': st.session_state.feedback,
'timestamp': time.strftime("%Y-%m-%d %H:%M:%S")
})
st.success("Evaluation completed!")
elif not st.session_state.current_question and st.button("Evaluate Answer"):
st.error("Please set a question before evaluating.")
with col2:
st.markdown("<h2 class='sub-header'>Evaluation Results</h2>", unsafe_allow_html=True)
if st.session_state.feedback:
# Display transcription
st.markdown("### Transcription")
st.markdown(f"<div class='feedback-box'>{st.session_state.transcription}</div>", unsafe_allow_html=True)
# Display score
st.markdown("### Score")
st.markdown(f"<div class='score-container'><div class='score-circle'>{st.session_state.score}/10</div></div>", unsafe_allow_html=True)
# Display overall feedback
st.markdown("### Overall Feedback")
st.markdown(f"<div class='feedback-box'>{st.session_state.feedback}</div>", unsafe_allow_html=True)
# Detailed analysis, strengths, and areas to improve in expandable sections
with st.expander("Detailed Analysis", expanded=True):
st.markdown(st.session_state.detailed_analysis)
with st.expander("Strengths", expanded=True):
for strength in st.session_state.strengths:
st.markdown(f"<div class='metric-card'>✅ {strength}</div>", unsafe_allow_html=True)
with st.expander("Areas to Improve", expanded=True):
for area in st.session_state.areas_to_improve:
st.markdown(f"<div class='metric-card'>🔍 {area}</div>", unsafe_allow_html=True)
with st.expander("Suggestions for Improvement", expanded=True):
for suggestion in st.session_state.suggestions:
st.markdown(f"<div class='metric-card'>💡 {suggestion}</div>", unsafe_allow_html=True)
else:
st.info("Upload an answer sheet and click 'Evaluate Answer' to see results here.")
# History section
st.markdown("<h2 class='sub-header'>Evaluation History</h2>", unsafe_allow_html=True)
if st.session_state.evaluation_history:
history_df = pd.DataFrame(st.session_state.evaluation_history)
st.dataframe(history_df, use_container_width=True)
# Create a simple visualization of scores over time
if len(history_df) > 1:
st.markdown("### Score Progression")
fig, ax = plt.subplots(figsize=(10, 4))
ax.plot(range(len(history_df)), history_df['score'], marker='o', linestyle='-', color='#1E88E5')
ax.set_xlabel('Evaluation Number')
ax.set_ylabel('Score')
ax.set_ylim(0, 10)
ax.grid(True, linestyle='--', alpha=0.7)
st.pyplot(fig)
else:
st.info("Your evaluation history will appear here after you evaluate answers.")
# Footer
st.markdown("---")
st.markdown("<p style='text-align: center;'>© 2023 Handwritten Answer Evaluator | Powered by AI</p>", unsafe_allow_html=True)
Hi! I can help you with any questions about Streamlit and Python. What would you like to know?