0
@router.post("/webhook")
async def webhook_events(request: Request):
    payload = await request.json()

    # print(json.dumps(payload, indent=4))
    message = ""
    # print(payload)
    if "contacts" in payload["entry"][0]["changes"][0]["value"]:
        user_name = payload["entry"][0]["changes"][0]["value"]["contacts"][0]["profile"]["name"]
        wa_id = payload["entry"][0]["changes"][0]["value"]["contacts"][0]["wa_id"]
        user_id = user_name.split(" ")[0] + "@" + wa_id

    if "messages" in payload["entry"][0]["changes"][0]["value"]:
        reciepient = payload["entry"][0]["changes"][0]["value"]["messages"][0]["from"]
        chat_history = RedisChatMessageHistory(user_id)
        memory = ConversationBufferMemory(
            chat_memory=chat_history,
            memory_key="chat_history",
            return_messages=True)
        if not chat_history.messages:
            chat_history.add_ai_message("Hi! What's your name? I'm MobSecBot, designed to help you with your mobile and security questions.")
            chat_history.add_user_message(f"Hi! My name is {user_name}.")
            expire_chat_history(r, f'message_store:{user_id}')

        agent_chain = agent_handler.init_chain(agent, memory)
        # agent_chain = initialize_agent(
        #             tools,
        #             llm,
        #             agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
        #             memory=memory,
        #             handle_parsing_errors="Check your output and make sure it conforms!")
        if "text" in payload["entry"][0]["changes"][0]["value"]["messages"][0]:
            message = payload["entry"][0]["changes"][0]["value"]["messages"][0]["text"]["body"]
            
            print(f"{reciepient}: {message}")
            try:
                response = agent_chain.run(message)
            except Exception as e:
                response = str(e)
                # response = response.removeprefix("Could not parse LLM output: `").removesuffix("`")
            print(f"MobSecBot: {response}")
        else:
            response = "Sorry! I can only respond to text messages for now."
        await messenger.send_message(response, reciepient)

I am developing a Whatsapp Chatbot using langchain, FastAPI and Meta Webhooks. I am getting multiple post requests for a single question asked in the AgentExecutor chain. I have attached images for reference below. enter image description here enter image description here

  • Please clarify your specific problem or provide additional details to highlight exactly what you need. As it's currently written, it's hard to tell exactly what you're asking. – Community Aug 29 '23 at 01:27

1 Answers1

0

'Some LLMs provide a streaming response. This means that instead of waiting for the entire response to be returned, you can start processing it as soon as it's available. This is useful if you want to display the response to the user as it's being generated, or if you want to process the response as it's being generated'

Josh P
  • 1