0

I am trying to replicate the Llamaindex demo (here: https://colab.research.google.com/drive/16QMQePkONNlDpgiltOi7oRQgmB8dU5fl?usp=sharing#scrollTo=20cf0152) on my Mac M1 but I keep hitting this error:

Process finished with exit code 132 (interrupted by signal 4: SIGILL)

I have tried to find the problem, but I am struggling. My code is below, but any support would be hugely appreciated.


from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.llms import GPT4All
from llama_index import load_index_from_storage
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from llama_index import (
    GPTVectorStoreIndex,
    LangchainEmbedding,
    LLMPredictor,
    ServiceContext,
    StorageContext,
    download_loader,
    PromptHelper
)
from langchain.document_loaders import PyPDFLoader
loader = PyPDFLoader("./IPCC_AR6_WGII_Chapter03.pdf")

documents = loader.load_and_split()

local_llm_path = "./ggml-gpt4all-j-v1.3-groovy.bin"

llm = GPT4All(model=local_llm_path, backend='gptj', streaming=True, n_ctx=1024)
llm_predictor = LLMPredictor(llm=llm)

embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2"))

prompt_helper = PromptHelper(max_input_size=1024, num_output=256, max_chunk_overlap=-1000)
service_context = ServiceContext.from_defaults(
    llm_predictor=llm_predictor,
    embed_model=embed_model,
    prompt_helper=prompt_helper,
    node_parser=SimpleNodeParser(text_splitter=TokenTextSplitter(chunk_size=512, chunk_overlap=50))
)

print(llm._call("hello world"))

index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
index.storage_context.persist(persist_dir="./storage")
storage_context = StorageContext.from_defaults(persist_dir="./storage")
index = load_index_from_storage(storage_context, service_context=service_context)

query_engine = index.as_query_engine(streaming=True, similarity_top_k=1, service_context=service_context)

response_stream = query_engine.query("What are the main climate risks to our Oceans?")
response_stream.print_response_stream()
Tom_Scott
  • 95
  • 7

0 Answers0