Drop files here
or click to upload
import numpy as np
import matplotlib.pyplot as plt
import time
import streamlit as st
from PIL import Image
import pandas as pd
import seaborn as sns
import io
# Set page configuration
st.set_page_config(
page_title="Brain Disorder ML Analysis",
page_icon="🧠",
layout="wide",
initial_sidebar_state="expanded"
)
# Add custom CSS
st.markdown("""
<style>
.main-header {
font-size: 2.5rem;
color: #4285F4;
text-align: center;
margin-bottom: 2rem;
}
.sub-header {
font-size: 1.5rem;
color: #34A853;
margin-bottom: 1rem;
}
.metric-card {
background-color: #f0f2f6;
border-radius: 10px;
padding: 1rem;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
.model-section {
margin-bottom: 2rem;
padding: 1rem;
border-radius: 10px;
background-color: #f9f9f9;
}
</style>
""", unsafe_allow_html=True)
# Title and Introduction
st.markdown("<h1 class='main-header'>Comparative Analysis of ML Models on Brain Disorder Image Datasets</h1>", unsafe_allow_html=True)
st.markdown("""
This application allows you to analyze brain MRI images using different machine learning models
and compare their performance across various brain disorders including:
- Alzheimer's Disease (No/Very Mild/Mild/Moderate Impairment)
- Brain Tumors (Glioma, Meningioma, No Tumor, Pituitary)
- Parkinson's Disease
""")
# Initialize session state for storing models and results
if 'models' not in st.session_state:
st.session_state.models = {}
if 'results' not in st.session_state:
st.session_state.results = {}
# Create sidebar for model selection
st.sidebar.title("Settings")
# Model selection
available_models = {
"AlexNet": "alexnet",
"ResNet-50": "resnet50",
"VGG-16": "vgg16",
"Inception V3": "inceptionv3"
}
selected_models = st.sidebar.multiselect(
"Select Models to Compare",
list(available_models.keys()),
default=["ResNet-50", "VGG-16"]
)
# Disorder selection
available_disorders = {
"Alzheimer's": {
"classes": ["No Impairment", "Very Mild Impairment", "Mild Impairment", "Moderate Impairment"],
"dataset_size": "5000+ images"
},
"Brain Tumor": {
"classes": ["Glioma", "Meningioma", "No Tumor", "Pituitary"],
"dataset_size": "7023 images"
},
"Parkinson's": {
"classes": ["Parkinson's", "Normal"],
"dataset_size": "830 images"
}
}
selected_disorders = st.sidebar.multiselect(
"Select Disorders to Analyze",
list(available_disorders.keys()),
default=["Brain Tumor"]
)
# Advanced settings
st.sidebar.markdown("---")
st.sidebar.markdown("<h3>Advanced Settings</h3>", unsafe_allow_html=True)
display_metrics = st.sidebar.checkbox("Show Detailed Metrics", value=True)
show_comparison_charts = st.sidebar.checkbox("Show Comparison Charts", value=True)
show_confusion_matrix = st.sidebar.checkbox("Show Confusion Matrix", value=True)
cache_models = st.sidebar.checkbox("Cache Models (Faster Re-analysis)", value=True)
# Function to preprocess the image for different models
def preprocess_image(img, model_name, target_size=(224, 224)):
# Ensure the image is in RGB format
if img.mode != 'RGB':
img = img.convert('RGB')
# Adjust target size for InceptionV3
if model_name == "inceptionv3":
target_size = (299, 299)
# Resize the image
img_resized = img.resize(target_size)
# Convert to array and add batch dimension
img_array = np.array(img_resized)
img_array = np.expand_dims(img_array, axis=0)
# Normalize values to [0, 1]
return img_array / 255.0
# Function to create a simulated model
@st.cache_resource
def get_model(model_name, disorder, num_classes):
model_key = f"{model_name}_{disorder}"
if not cache_models or model_key not in st.session_state.models:
# We're simulating models here - no actual models are created
# In a production app, you would load your trained models
st.session_state.models[model_key] = {
"name": model_name,
"disorder": disorder,
"num_classes": num_classes
}
return st.session_state.models[model_key]
# Function to make consistent predictions
def make_prediction(model, preprocessed_img, class_names, disorder, model_name):
# We'll use a deterministic approach based on image features to get consistent results
# In a real scenario, you'd use model.predict(preprocessed_img)
# Hash the image data to get deterministic results
img_hash = hash(preprocessed_img.tobytes()) % 10000
np.random.seed(img_hash)
# Get base accuracy for the model and disorder combination
base_accuracies = {
"resnet50": {"Alzheimer's": 0.89, "Brain Tumor": 0.92, "Parkinson's": 0.86},
"vgg16": {"Alzheimer's": 0.87, "Brain Tumor": 0.90, "Parkinson's": 0.84},
"inceptionv3": {"Alzheimer's": 0.91, "Brain Tumor": 0.94, "Parkinson's": 0.88},
"alexnet": {"Alzheimer's": 0.83, "Brain Tumor": 0.85, "Parkinson's": 0.81}
}
accuracy = base_accuracies.get(model_name, {}).get(disorder, 0.85)
# Generate consistent but plausible predictions
alpha = np.ones(len(class_names))
alpha[np.random.randint(0, len(class_names))] = 5.0 # Bias towards one class
confidences = np.random.dirichlet(alpha)
# Ensure the top prediction is always the same for the same image
top_class_idx = (img_hash % len(class_names))
# Rearrange to make the selected class the top prediction
max_conf = np.max(confidences)
second_max = np.max([c for i, c in enumerate(confidences) if i != np.argmax(confidences)])
confidences[np.argmax(confidences)] = second_max
confidences[top_class_idx] = max_conf
# Normalize to ensure sum is 1
confidences = confidences / np.sum(confidences)
# Calculate metrics based on the base accuracy
precision = accuracy - 0.02 + (img_hash % 100) / 1000
recall = accuracy - 0.03 + (img_hash % 100) / 1000
f1 = 2 * (precision * recall) / (precision + recall)
# Create a consistent confusion matrix
np.random.seed(img_hash)
cm = create_confusion_matrix(class_names, accuracy, top_class_idx)
return {
"confidences": confidences,
"predicted_class": class_names[top_class_idx],
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"f1_score": f1,
"confusion_matrix": cm
}
# Function to create a realistic confusion matrix
def create_confusion_matrix(class_names, accuracy, correct_class_idx):
n_classes = len(class_names)
# Create a base matrix with low values
cm = np.random.randint(1, 10, size=(n_classes, n_classes))
# Set the diagonal (correct predictions) to be much higher
for i in range(n_classes):
cm[i, i] = int(50 + (accuracy * 100))
# Ensure the correct class has even higher values
cm[correct_class_idx, correct_class_idx] = int(80 + (accuracy * 100))
return cm
# Main content area split into two columns
col1, col2 = st.columns([1, 2])
# File uploader in the first column
with col1:
st.markdown("<h2 class='sub-header'>Upload Brain MRI Image</h2>", unsafe_allow_html=True)
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
image_pil = Image.open(uploaded_file)
st.image(image_pil, caption="Uploaded MRI Image", width=300)
# Second column for results and visualizations
with col2:
if uploaded_file is not None and selected_models and selected_disorders:
st.markdown("<h2 class='sub-header'>Analysis Results</h2>", unsafe_allow_html=True)
# Progress bar for analysis
progress_bar = st.progress(0)
status_text = st.empty()
# Initialize results dictionary
results = {}
# Analyze image with selected models and disorders
total_steps = len(selected_models) * len(selected_disorders)
current_step = 0
for model_name in selected_models:
model_key = available_models[model_name]
results[model_name] = {}
for disorder in selected_disorders:
current_step += 1
progress = current_step / total_steps
progress_bar.progress(progress)
status_text.text(f"Analyzing with {model_name} for {disorder}...")
# Get class names
class_names = available_disorders[disorder]["classes"]
num_classes = len(class_names)
# Get model
model = get_model(model_key, disorder, num_classes)
# Preprocess the image
start_time = time.time()
preprocessed_img = preprocess_image(image_pil, model_key)
# Make prediction
prediction_result = make_prediction(model, preprocessed_img, class_names, disorder, model_key)
prediction_time = time.time() - start_time
# Store all results
results[model_name][disorder] = {
"predicted_class": prediction_result["predicted_class"],
"confidences": prediction_result["confidences"],
"class_names": class_names,
"accuracy": prediction_result["accuracy"],
"precision": prediction_result["precision"],
"recall": prediction_result["recall"],
"f1_score": prediction_result["f1_score"],
"prediction_time": prediction_time,
"confusion_matrix": prediction_result["confusion_matrix"]
}
# Save results to session state
st.session_state.results = results
# Clear progress indicators
progress_bar.empty()
status_text.empty()
# Display results
st.markdown("<h3>Results Summary</h3>", unsafe_allow_html=True)
# Create tabs for each disorder
tabs = st.tabs(selected_disorders)
for i, disorder in enumerate(selected_disorders):
with tabs[i]:
st.write(f"### {disorder} Analysis")
# Create a dataframe for all models' results for this disorder
model_results = []
for model_name in selected_models:
res = results[model_name][disorder]
model_results.append({
"Model": model_name,
"Predicted Class": res["predicted_class"],
"Confidence": f"{np.max(res['confidences']):.2%}",
"Accuracy": f"{res['accuracy']:.2%}",
"Precision": f"{res['precision']:.2%}",
"Recall": f"{res['recall']:.2%}",
"F1 Score": f"{res['f1_score']:.2%}",
"Analysis Time": f"{res['prediction_time']:.4f} sec"
})
# Display results as a table
df_results = pd.DataFrame(model_results)
st.dataframe(df_results)
# If detailed metrics are requested
if display_metrics:
st.write("#### Detailed Class Probabilities")
for model_name in selected_models:
res = results[model_name][disorder]
st.write(f"**{model_name}**")
# Create a dataframe for class probabilities
probs_df = pd.DataFrame({
'Class': res["class_names"],
'Probability': res["confidences"]
})
# Plot bar chart of probabilities
fig, ax = plt.subplots(figsize=(8, 3))
sns.barplot(x='Probability', y='Class', data=probs_df, ax=ax)
ax.set_xlim(0, 1)
ax.set_title(f"{model_name} - {disorder} Class Probabilities")
st.pyplot(fig)
# Show confusion matrices if requested
if show_confusion_matrix:
st.write("#### Confusion Matrices")
for model_name in selected_models:
res = results[model_name][disorder]
st.write(f"**{model_name} Confusion Matrix**")
# Plot confusion matrix
fig, ax = plt.subplots(figsize=(10, 8))
sns.heatmap(
res["confusion_matrix"],
annot=True,
fmt='d',
cmap='Blues',
xticklabels=res["class_names"],
yticklabels=res["class_names"],
ax=ax
)
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.title(f"{model_name} - {disorder} Confusion Matrix")
st.pyplot(fig)
# Calculate and display additional matrix-based metrics
total = np.sum(res["confusion_matrix"])
accuracy_from_cm = np.sum(np.diag(res["confusion_matrix"])) / total
st.write(f"""
**Matrix-based Metrics:**
- Matrix Accuracy: {accuracy_from_cm:.2%}
- Total Samples: {total}
- Correct Predictions: {np.sum(np.diag(res["confusion_matrix"]))}
""")
# Show comparison charts if requested
if show_comparison_charts and len(selected_models) > 1:
st.write("#### Model Comparison")
# Extract metrics for comparison
metrics_df = pd.DataFrame([
{
"Model": model_name,
"Accuracy": results[model_name][disorder]["accuracy"],
"Precision": results[model_name][disorder]["precision"],
"Recall": results[model_name][disorder]["recall"],
"F1 Score": results[model_name][disorder]["f1_score"],
"Analysis Time (sec)": results[model_name][disorder]["prediction_time"]
}
for model_name in selected_models
])
# Create metrics comparison chart
fig, ax = plt.subplots(figsize=(10, 5))
metrics_df_melt = pd.melt(
metrics_df,
id_vars=['Model'],
value_vars=['Accuracy', 'Precision', 'Recall', 'F1 Score'],
var_name='Metric',
value_name='Value'
)
sns.barplot(x='Model', y='Value', hue='Metric', data=metrics_df_melt, ax=ax)
ax.set_title(f"Performance Metrics Comparison - {disorder}")
ax.set_ylim(0, 1)
st.pyplot(fig)
# Analysis time comparison
fig, ax = plt.subplots(figsize=(8, 3))
sns.barplot(x='Analysis Time (sec)', y='Model', data=metrics_df, ax=ax)
ax.set_title(f"Analysis Time Comparison - {disorder}")
st.pyplot(fig)
# Overall comparison if multiple disorders are selected
if len(selected_disorders) > 1:
st.markdown("---")
st.markdown("<h3>Cross-Disorder Comparison</h3>", unsafe_allow_html=True)
for model_name in selected_models:
st.write(f"### {model_name} Performance Across Disorders")
# Create dataframe for cross-disorder comparison
cross_disorder_df = pd.DataFrame([
{
"Disorder": disorder,
"Predicted Class": results[model_name][disorder]["predicted_class"],
"Accuracy": results[model_name][disorder]["accuracy"],
"Analysis Time (sec)": results[model_name][disorder]["prediction_time"]
}
for disorder in selected_disorders
])
st.dataframe(cross_disorder_df)
# Plot accuracy comparison across disorders
fig, ax = plt.subplots(figsize=(8, 4))
sns.barplot(x='Disorder', y='Accuracy', data=cross_disorder_df, ax=ax)
ax.set_title(f"{model_name} - Accuracy Across Disorders")
ax.set_ylim(0, 1)
st.pyplot(fig)
if uploaded_file is not None:
st.warning("Please select at least one model and one disorder type to analyze.")
else:
st.info("Please upload an MRI image to begin analysis.")
# Additional information about the application
st.markdown("---")
st.markdown("<h2 class='sub-header'>About This Application</h2>", unsafe_allow_html=True)
st.markdown("""
This application demonstrates the comparative analysis of different machine learning models
for brain disorder detection from MRI images. The current implementation uses deterministic
prediction logic to produce consistent results for the demonstration.
**Supported Model Architectures:**
- AlexNet
- ResNet-50
- VGG-16
- Inception V3
**Supported Brain Disorders:**
- Alzheimer's Disease (4 classes)
- Brain Tumors (4 classes)
- Parkinson's Disease (2 classes)
**Metrics Evaluated:**
- Accuracy
- Precision
- Recall
- F1 Score
- Confusion Matrix
- Analysis Time
**How to improve this app:**
1. Train actual models on real brain disorder datasets
2. Implement model saving and loading from files
3. Add explainability features like heatmaps to show which regions affect predictions
4. Implement user account features to save analysis history
""")
# Footer
st.markdown("---")
st.markdown("""
<div style="text-align: center">
Developed by Aman Singh<br>
Under the Supervision of Mr. Ashis Datta and Dr. Palash Ghosal
</div>
""", unsafe_allow_html=True)
# Function to run the app
def run_app():
st.empty()
if __name__ == "__main__":
run_app()
Hi! I can help you with any questions about Streamlit and Python. What would you like to know?