I am trying to train a gpt model using openai api and langchain in order to make a chatbot on my custom data, I have prepared my data in txt format and when the user ask a question get the response correctly but I am facing a problem which is the answer isn't complete how could I handle this?
I tried to change the model, and I tried to increase the variables value and didn't work here is my code
from flask import Flask
app = Flask(__name__)
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper, ServiceContext
from langchain.chat_models import ChatOpenAI
import os
os.environ["OPENAI_API_KEY"] = "MYAPI"
def construct_index(directory_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 2000
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
# define prompt helper
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
# define LLM
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="text-davinci-003", max_tokens=num_outputs))
documents = SimpleDirectoryReader(directory_path).load_data()
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
index.save_to_disk('index.json')
return index
construct_index("context_data/data")
def ask_ai(query):
index = GPTSimpleVectorIndex.load_from_disk('index.json')
response = index.query(query)
return response.response
@app.route('/<question>')
def answer_question(question):
answer = ask_ai(question)
return answer
if __name__ == '__main__':
app.run(debug=True)