I'm deputing the code below and I get an error message.
ImportError
cannot import name 'FunctionMessage' from 'langchain.schema' (/Users/janiobonfim/Projetos-VSCode/env/lib/python3.11/site-packages/langchain/schema.py)
File "/Users/janiobonfim/Projetos-VSCode/main.py", line 9, in <module>
from llama_index import GPTVectorStoreIndex, download_loader, MockLLMPredictor, ServiceContext
ImportError: cannot import name 'FunctionMessage' from 'langchain.schema' (/Users/janiobonfim/Projetos-VSCode/env/lib/python3.11/site-packages/langchain/schema.py)
I already updated the versions of Llama-index and Langchain, but the error persists... The following code aims to fetch a text from google docs and index it with llama-index and submit it to OpenAI's ChatGPT.
Can anyone help me identify where I'm going wrong? Thank you very much.
Jan
import os
import pickle
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
line 9 from llama_index import GPTVectorStoreIndex, download_loader, MockLLMPredictor, ServiceContext
os.environ['OPENAI_API_KEY'] = '...'
llm_predictor = MockLLMPredictor(max_tokens=256)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
def authorize_gdocs():
cred = None
if os.path.exists("token.pickle"):
with open("token.pickle", 'rb') as token:
cred = pickle.load(token)
if not cred or not cred.valid:
if cred and cred.expired and cred.refresh_token:
cred.refresh(Request())
else:
print(os.getcwd())
creds_file_path = os.path.abspath('credentials.json')
google_oauth2_scopes = [
"https://www.googleapis.com/auth/documents.readonly"
]
flow = InstalledAppFlow.from_client_secrets_file(creds_file_path, google_oauth2_scopes)
cred = flow.run_local_server(port=0)
with open("token.pickle", 'wb') as token:
pickle.dump(cred, token)
if __name__ == '__main__':
authorize_gdocs()
GoogleDocsReader = download_loader('GoogleDocsReader')
gdoc_ids = ['...']
loader = GoogleDocsReader()
documents = loader.load_data(document_ids=gdoc_ids)
index = GPTVectorStoreIndex.from_documents(documents)
while True:
prompt = input("Type prompt...")
query_engine = index.as_query_engine()
response = query_engine.query(prompt)
print(response)
query_engine = index.as_query_engine(
service_context=service_context
)
# get number of tokens used
print(llm_predictor.last_token_usage)