1

I'm having issues running simple example from the langchain

from dotenv import load_dotenv,find_dotenv
load_dotenv(find_dotenv())

from langchain.llms import openai

llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))

When I'm running it, I have an error below. Today in the morning the code was working fine, not sure what's happened

Could you please help ?

'''

InvalidRequestError Traceback (most recent call last) Cell In[40], line 2 1 text = "What would be a good company name for a company that makes colorful socks?" ----> 2 print(llm(text))

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\langchain\llms\base.py:291, in BaseLLM.call(self, prompt, stop, callbacks) 286 def call( 287 self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None 288 ) -> str: 289 """Check Cache and run the LLM on the given prompt and input.""" 290 return ( --> 291 self.generate([prompt], stop=stop, callbacks=callbacks) 292 .generations[0][0] 293 .text 294 )

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\langchain\llms\base.py:191, in BaseLLM.generate(self, prompts, stop, callbacks) 189 except (KeyboardInterrupt, Exception) as e: 190 run_manager.on_llm_error(e) --> 191 raise e 192 run_manager.on_llm_end(output) 193 return output

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\langchain\llms\base.py:185, in BaseLLM.generate(self, prompts, stop, callbacks) 180 run_manager = callback_manager.on_llm_start( 181 {"name": self.class.name}, prompts, invocation_params=params 182 ) 183 try: 184 output = ( --> 185 self._generate(prompts, stop=stop, run_manager=run_manager) 186 if new_arg_supported 187 else self._generate(prompts, stop=stop) 188 ) 189 except (KeyboardInterrupt, Exception) as e: 190 run_manager.on_llm_error(e)

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\langchain\llms\openai.py:315, in BaseOpenAI._generate(self, prompts, stop, run_manager) 313 choices.extend(response["choices"]) 314 else: --> 315 response = completion_with_retry(self, prompt=_prompts, **params) 316 choices.extend(response["choices"]) 317 if not self.streaming: 318 # Can't update token usage if streaming

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\langchain\llms\openai.py:106, in completion_with_retry(llm, **kwargs) 102 @retry_decorator 103 def _completion_with_retry(**kwargs: Any) -> Any: 104 return llm.client.create(**kwargs) --> 106 return _completion_with_retry(**kwargs)

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\tenacity_init_.py:289, in BaseRetrying.wraps..wrapped_f(*args, **kw) 287 @functools.wraps(f) 288 def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any: --> 289 return self(f, *args, **kw)

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\tenacity_init_.py:379, in Retrying.call(self, fn, *args, **kwargs) 377 retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs) 378 while True: --> 379 do = self.iter(retry_state=retry_state) 380 if isinstance(do, DoAttempt): 381 try:

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\tenacity_init_.py:314, in BaseRetrying.iter(self, retry_state) 312 is_explicit_retry = fut.failed and isinstance(fut.exception(), TryAgain) 313 if not (is_explicit_retry or self.retry(retry_state)): --> 314 return fut.result() 316 if self.after is not None: 317 self.after(retry_state)

File ~\AppData\Local\Programs\Python\Python39\lib\concurrent\futures_base.py:438, in Future.result(self, timeout) 436 raise CancelledError() 437 elif self._state == FINISHED: --> 438 return self.__get_result() 440 self._condition.wait(timeout) 442 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:

File ~\AppData\Local\Programs\Python\Python39\lib\concurrent\futures_base.py:390, in Future.__get_result(self) 388 if self._exception: 389 try: --> 390 raise self._exception 391 finally: 392 # Break a reference cycle with the exception in self._exception 393 self = None

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\tenacity_init_.py:382, in Retrying.call(self, fn, *args, **kwargs) 380 if isinstance(do, DoAttempt): 381 try: --> 382 result = fn(*args, **kwargs) 383 except BaseException: # noqa: B902 384 retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\langchain\llms\openai.py:104, in completion_with_retry.._completion_with_retry(**kwargs) 102 @retry_decorator 103 def _completion_with_retry(**kwargs: Any) -> Any: --> 104 return llm.client.create(**kwargs)

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\openai\api_resources\completion.py:25, in Completion.create(cls, *args, **kwargs) 23 while True: 24 try: ---> 25 return super().create(*args, **kwargs) 26 except TryAgain as e: 27 if timeout is not None and time.time() > start + timeout:

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\openai\api_resources\abstract\engine_api_resource.py:149, in EngineAPIResource.create(cls, api_key, api_base, api_type, request_id, api_version, organization, **params) 127 @classmethod 128 def create( 129 cls, (...) 136 **params, 137 ): 138 ( 139 deployment_id, 140 engine, 141 timeout, 142 stream, 143 headers, 144 request_timeout, 145 typed_api_type, 146 requestor, 147 url, 148 params, --> 149 ) = cls.__prepare_create_request( 150 api_key, api_base, api_type, api_version, organization, **params 151 ) 153 response, _, api_key = requestor.request( 154 "post", 155 url, (...) 160 request_timeout=request_timeout, 161 ) 163 if stream: 164 # must be an iterator

File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\openai\api_resources\abstract\engine_api_resource.py:83, in EngineAPIResource.__prepare_create_request(cls, api_key, api_base, api_type, api_version, organization, **params) 81 if typed_api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD): 82 if deployment_id is None and engine is None: ---> 83 raise error.InvalidRequestError( 84 "Must provide an 'engine' or 'deployment_id' parameter to create a %s" 85 % cls, 86 "engine", 87 ) 88 else: 89 if model is None and engine is None:

InvalidRequestError: Must provide an 'engine' or 'deployment_id' parameter to create a <class 'openai.api_resources.completion.Completion'> '''

Andrey Tagaew
  • 1,573
  • 1
  • 15
  • 20

2 Answers2

1

From the traceback you provided, it looks like you're using an Azure API key:

if typed_api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD): 82 if deployment_id is None and engine is None: ---> 83 raise error.InvalidRequestError.

LangChain has a separate AzureOpenAI class that handles the slightly different setup that comes with using Azure. Note that you need to have your own deployment instance on Azure, which you then reference when initializing your llm instance.

From the docs:

from langchain.llms import AzureOpenAI

# Create an instance of Azure OpenAI
# Replace the deployment name with your own
llm = AzureOpenAI(
    deployment_name="td2",
    model_name="text-davinci-002", 
)

llm("Tell me a joke")

Others who have encountered this issue seem to have found a workaround by using the standard langchain.llms.OpenAI class but then passing in a custom 'engine' parameter (instead of 'model'). But it looks like you don't need to tangle with that now that LC has an official Azure interface.

The OpenAI Github documentation on Azure endpoints may also be of use here.

andrew_reece
  • 20,390
  • 3
  • 33
  • 58
1

The problem was not with the code itself but with Jupyter notebook where I tried to start it. When i copied it into simple python file and ran via VSCode, everything worked well.

Andrey Tagaew
  • 1,573
  • 1
  • 15
  • 20