Drop files here
or click to upload
import streamlit as st
import requests
import time
import json # Import the json module to handle potential parsing errors
# --- Configuration ---
# The URL of your AI agent's backend
BACKEND_URL = "https://emea.snaplogic.com/api/1/rest/slsched/feed/ConnectFasterInc/snapLogic4snapLogic/Bootcamp_EMEA_August_2025/AgentDriver_Swati_Task"
# The bearer token for authentication
TOKEN = "Bearer 5555"
# --- Streamlit App UI ---
st.set_page_config(page_title="SnapLogic Best Practices", layout="centered")
st.title("🤖 SnapLogic Best Practices")
st.markdown("Get detailed answers to RFP questions and technical inquiries, with information sourced from official documentation of SnapLogic.")
# Add a section for sample queries
st.markdown("""
**Sample queries:**
- How to design the pipeline?
- What are the SnapLogic Tasks?
- Please recommend Pipeline Naming Conventions
- What are the best practices to design the pipeline?
- Best practices for the Pipeline Execute Snap
""")
# --- Initialize Chat History in Session State ---
# This ensures that the chat history persists across reruns
if "messages" not in st.session_state:
st.session_state.messages = [
{"role": "assistant", "content": "Hello! How can I help you today?"}
]
# --- Display Existing Chat Messages ---
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# --- Handle User Input ---
# This block is executed whenever the user types a message and hits enter
if prompt := st.chat_input("Type your message..."):
# Add the user's message to the chat history and display it
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# --- API Call to Backend ---
try:
# Display a "thinking" message from the assistant
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
# Define the headers for the API request, including the Authorization token
headers = {
"Authorization": TOKEN,
"Content-Type": "application/json"
}
# Send the user's prompt to the backend
# Increased the timeout to 120 seconds to give the backend more time to respond.
response = requests.post(
BACKEND_URL,
json={"prompt": prompt},
headers=headers, # Pass the headers with the token
timeout=120 # Increased timeout to 120 seconds
)
# Raise an exception for bad status codes
response.raise_for_status()
# Get the JSON response
response_data = response.json()
# Check if the response is a list and contains a dictionary
if isinstance(response_data, list) and response_data and isinstance(response_data[0], dict):
# Safely get the 'response' key from the first item in the list
ai_response = response_data[0].get("response", "No response found.")
elif isinstance(response_data, dict):
# If it's a dictionary, get the 'response' key
ai_response = response_data.get("response", "No response found.")
else:
# For any other data type, convert it to a string
ai_response = str(response_data)
st.session_state.messages.append({"role": "assistant", "content": ai_response})
st.markdown(ai_response)
except requests.exceptions.RequestException as e:
# Handle connection errors, timeouts, and bad status codes
error_message = f"Error: Failed to connect to the backend. Please check the URL and your connection. Details: {e}"
st.session_state.messages.append({"role": "assistant", "content": error_message})
with st.chat_message("assistant"):
st.error(error_message)
except json.JSONDecodeError as e:
# Handle cases where the response is not valid JSON
error_message = f"Error: Could not decode JSON response from the backend. Details: {e}"
st.session_state.messages.append({"role": "assistant", "content": error_message})
with st.chat_message("assistant"):
st.error(error_message)
# Rerun the app to display the new messages
st.rerun()
Hi! I can help you with any questions about Streamlit and Python. What would you like to know?