0

My project is to do a search engine based natural language. I don't use eGPU nor M1/M2

Here is a part of my code

import os
from typing import Any, List

# from llm import CustomLLM
from langchain.chains import RetrievalQA
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import LlamaCpp
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import DeepLake

HF_KEY = ...
os.environ["HUGGINGFACEHUB_API_TOKEN"] = HF_KEY

class SearchEngine:
    def __init__(self, verbose: bool = False):
        self.llm: Any
        self.db: Any
        self.embedding: Any

        self.verbose: bool = verbose
    
    def _load_llm(self):
        self.llm = LlamaCpp(
            model_path="./llama-2-7b-chat.ggmlv3.q4_0.bin",
            # input={"temperature": 0.2, "max_length": 300, "top_p": 1},
            verbose=True,
        )
    
    def _load_db(self):
        self.db = DeepLake(
            "./deeplake-dataset",
            embedding=self.embedding,
            verbose=True,
        )
    
    def _load_embedding(self):
        self.embedding = HuggingFaceEmbeddings()

    def _load_qa(self):
        self.qa = RetrievalQA.from_chain_type(
            self.llm,
            chain_type="stuff",
            retriever=self.retriever,
            return_source_documents=True,
        )

    def __getattr__(self, name):
        lazy_attr = {
            'llm': self._load_llm,
            'db': self._load_db,
            'embedding': self._load_embedding,
            'qa': self._load_qa,
        }

        if name in lazy_attr:
            lazy_attr[name]()
            return getattr(self, name)
        else:
            return super().__getattribute__(name)

    @property
    def retriever(self):
        return self.db.as_retriever()

    def feed(self, pdf_files: List[str]):
        ...

    def search(self, query: str, natural_response_attended: bool=False):
        if natural_response_attended:
            return self.qa({"query": query})
        else:
            return self.db.similarity_search(query, k=4, distance_metric="cos", verbose=self.verbose)

The case with qa take too much time

llama.cpp: loading model from ./llama-2-7b-chat.ggmlv3.q4_0.bin
llama_model_load_internal: format     = ggjt v3 (latest)
llama_model_load_internal: n_vocab    = 32000
llama_model_load_internal: n_ctx      = 512
llama_model_load_internal: n_embd     = 4096
llama_model_load_internal: n_mult     = 256
llama_model_load_internal: n_head     = 32
llama_model_load_internal: n_head_kv  = 32
llama_model_load_internal: n_layer    = 32
llama_model_load_internal: n_rot      = 128
llama_model_load_internal: n_gqa      = 1
llama_model_load_internal: rnorm_eps  = 1.0e-06
llama_model_load_internal: n_ff       = 11008
llama_model_load_internal: freq_base  = 10000.0
llama_model_load_internal: freq_scale = 1
llama_model_load_internal: ftype      = 2 (mostly Q4_0)
llama_model_load_internal: model size = 7B
llama_model_load_internal: ggml ctx size =    0.08 MB
llama_model_load_internal: mem required  = 3917.73 MB (+  256.00 MB per state)
llama_new_context_with_model: kv self size  =  256.00 MB
AVX = 1 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 | 

llama_print_timings:        load time =  2642.39 ms
llama_print_timings:      sample time =    86.19 ms /   112 runs   (    0.77 ms per token,  1299.44 tokens per second)
llama_print_timings: prompt eval time = 91584.58 ms /   252 tokens (  363.43 ms per token,     2.75 tokens per second)
llama_print_timings:        eval time = 249264.83 ms /   111 runs   ( 2245.63 ms per token,     0.45 tokens per second)
llama_print_timings:       total time = 342113.65 ms

I had previously used llama(not llama2) it was faster (near 30sec), Is there optimisations that can really speed up my program, like maybe using the integreted gpu or using another llm from huggingface, custom llm, changing the prompt or something ?

Or any advise for low cost performance for llm

Faulheit
  • 116
  • 7

0 Answers0