streamlit_chat_app.py
· 1.9 KiB · Python
Sin formato
import streamlit as st
import random
import time
st.write("Streamlit loves LLMs! 🤖 [Build your own chat app](https://docs.streamlit.io/develop/tutorials/llms/build-conversational-apps) in minutes, then make it powerful by adding images, dataframes, or even input widgets to the chat.")
st.caption("Note that this demo app isn't actually connected to any LLMs. Those are expensive ;)")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = [{"role": "assistant", "content": "Let's start chatting! 👇"}]
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("What is up?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
assistant_response = random.choice(
[
"Hello there! How can I assist you today?",
"Hi, human! Is there anything I can help you with?",
"Do you need help?",
]
)
# Simulate stream of response with milliseconds delay
for chunk in assistant_response.split():
full_response += chunk + " "
time.sleep(0.05)
# Add a blinking cursor to simulate typing
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response})
| 1 | import streamlit as st |
| 2 | import random |
| 3 | import time |
| 4 | |
| 5 | st.write("Streamlit loves LLMs! 🤖 [Build your own chat app](https://docs.streamlit.io/develop/tutorials/llms/build-conversational-apps) in minutes, then make it powerful by adding images, dataframes, or even input widgets to the chat.") |
| 6 | |
| 7 | st.caption("Note that this demo app isn't actually connected to any LLMs. Those are expensive ;)") |
| 8 | |
| 9 | # Initialize chat history |
| 10 | if "messages" not in st.session_state: |
| 11 | st.session_state.messages = [{"role": "assistant", "content": "Let's start chatting! 👇"}] |
| 12 | |
| 13 | # Display chat messages from history on app rerun |
| 14 | for message in st.session_state.messages: |
| 15 | with st.chat_message(message["role"]): |
| 16 | st.markdown(message["content"]) |
| 17 | |
| 18 | # Accept user input |
| 19 | if prompt := st.chat_input("What is up?"): |
| 20 | # Add user message to chat history |
| 21 | st.session_state.messages.append({"role": "user", "content": prompt}) |
| 22 | # Display user message in chat message container |
| 23 | with st.chat_message("user"): |
| 24 | st.markdown(prompt) |
| 25 | |
| 26 | # Display assistant response in chat message container |
| 27 | with st.chat_message("assistant"): |
| 28 | message_placeholder = st.empty() |
| 29 | full_response = "" |
| 30 | assistant_response = random.choice( |
| 31 | [ |
| 32 | "Hello there! How can I assist you today?", |
| 33 | "Hi, human! Is there anything I can help you with?", |
| 34 | "Do you need help?", |
| 35 | ] |
| 36 | ) |
| 37 | # Simulate stream of response with milliseconds delay |
| 38 | for chunk in assistant_response.split(): |
| 39 | full_response += chunk + " " |
| 40 | time.sleep(0.05) |
| 41 | # Add a blinking cursor to simulate typing |
| 42 | message_placeholder.markdown(full_response + "▌") |
| 43 | message_placeholder.markdown(full_response) |
| 44 | # Add assistant response to chat history |
| 45 | st.session_state.messages.append({"role": "assistant", "content": full_response}) |
| 46 |