|
| 1 | +import streamlit as st |
| 2 | +import asyncio |
| 3 | + |
| 4 | +from llama_index.tools.mcp import BasicMCPClient |
| 5 | + |
| 6 | +MCP_CLIENT = BasicMCPClient(command_or_url="http://localhost:8000/mcp") |
| 7 | + |
| 8 | + |
| 9 | +async def chat(inpt: str): |
| 10 | + result = await MCP_CLIENT.call_tool( |
| 11 | + tool_name="query_index_tool", arguments={"question": inpt} |
| 12 | + ) |
| 13 | + return result.content[0].text |
| 14 | + |
| 15 | + |
| 16 | +def sync_chat(inpt: str): |
| 17 | + return asyncio.run(chat(inpt)) |
| 18 | + |
| 19 | + |
| 20 | +# Chat Interface |
| 21 | +st.set_page_config(page_title="NotebookLlaMa - Document Chat", page_icon="🗣") |
| 22 | + |
| 23 | +st.sidebar.header("Document Chat🗣") |
| 24 | +st.sidebar.info("To switch to the Home page, select it from above!🔺") |
| 25 | +st.markdown("---") |
| 26 | +st.markdown("## NotebookLlaMa - Document Chat🗣") |
| 27 | + |
| 28 | +# Initialize chat history |
| 29 | +if "messages" not in st.session_state: |
| 30 | + st.session_state.messages = [] |
| 31 | + |
| 32 | +# Display chat messages from history on app rerun |
| 33 | +for i, message in enumerate(st.session_state.messages): |
| 34 | + with st.chat_message(message["role"]): |
| 35 | + if message["role"] == "assistant" and "sources" in message: |
| 36 | + # Display the main response |
| 37 | + st.markdown(message["content"]) |
| 38 | + # Add toggle for sources |
| 39 | + with st.expander("Sources"): |
| 40 | + st.markdown(message["sources"]) |
| 41 | + else: |
| 42 | + st.markdown(message["content"]) |
| 43 | + |
| 44 | +# React to user input |
| 45 | +if prompt := st.chat_input("Ask a question about your document"): |
| 46 | + # Display user message in chat message container |
| 47 | + st.chat_message("user").markdown(prompt) |
| 48 | + # Add user message to chat history |
| 49 | + st.session_state.messages.append({"role": "user", "content": prompt}) |
| 50 | + |
| 51 | + # Get bot response |
| 52 | + with st.chat_message("assistant"): |
| 53 | + with st.spinner("Thinking..."): |
| 54 | + try: |
| 55 | + response = sync_chat(prompt) |
| 56 | + |
| 57 | + # Split response and sources if they exist |
| 58 | + # Assuming your response format includes sources somehow |
| 59 | + # You might need to modify this based on your actual response format |
| 60 | + if "## Sources" in response: |
| 61 | + parts = response.split("## Sources", 1) |
| 62 | + main_response = parts[0].strip() |
| 63 | + sources = "## Sources" + parts[1].strip() |
| 64 | + else: |
| 65 | + main_response = response |
| 66 | + sources = None |
| 67 | + |
| 68 | + st.markdown(main_response) |
| 69 | + |
| 70 | + # Add toggle for sources if they exist |
| 71 | + if sources: |
| 72 | + with st.expander("Sources"): |
| 73 | + st.markdown(sources) |
| 74 | + # Add to history with sources |
| 75 | + st.session_state.messages.append( |
| 76 | + { |
| 77 | + "role": "assistant", |
| 78 | + "content": main_response, |
| 79 | + "sources": sources, |
| 80 | + } |
| 81 | + ) |
| 82 | + else: |
| 83 | + # Add to history without sources |
| 84 | + st.session_state.messages.append( |
| 85 | + {"role": "assistant", "content": main_response} |
| 86 | + ) |
| 87 | + |
| 88 | + except Exception as e: |
| 89 | + error_msg = f"Error: {str(e)}" |
| 90 | + st.markdown(error_msg) |
| 91 | + st.session_state.messages.append( |
| 92 | + {"role": "assistant", "content": error_msg} |
| 93 | + ) |
0 commit comments