请稍等 ...
×

采纳答案成功!

向帮助你的同学说点啥吧!感谢那些助人为乐的人

一直提示无效的令牌

代码如下:

#hello world
from langchain.llms import OpenAI
import os
os.environ[“OPENAI_KEY”] = "sk-proj-xxx(不方便透露)"
os.environ[“OPENAI_API_BASE”] = "https://ai-yyds.com/v1"
openai_api_key = os.getenv(“OPENAI_KEY”)
openai_api_base = os.getenv(“OPENAI_API_BASE”)
print(“OPENAI_API_KEY:”, openai_api_key)
print(“OPENAI_PROXY:”, openai_api_base)
api_base = os.getenv(“OPENAI_API_BASE”)
api_key = os.getenv(“OPENAI_KEY”)
llm = OpenAI(
model=“gpt-3.5-turbo-16k”,
temperature=0,
openai_api_key=api_key,
openai_api_base=api_base
)
llm.predict(“介绍下你自己”)

错误如下:


AuthenticationError Traceback (most recent call last)
Cell In[6], line 21
14 api_key = os.getenv(“OPENAI_KEY”)
15 llm = OpenAI(
16 model=“gpt-3.5-turbo”,
17 temperature=0,
18 openai_api_key=api_key,
19 openai_api_base=api_base
20 )
—> 21 llm.predict(“介绍下你自己”)

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/langchain/llms/base.py:865, in BaseLLM.predict(self, text, stop, **kwargs)
863 else:
864 _stop = list(stop)
–> 865 return self(text, stop=_stop, **kwargs)

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/langchain/llms/base.py:825, in BaseLLM.call(self, prompt, stop, callbacks, tags, metadata, **kwargs)
818 if not isinstance(prompt, str):
819 raise ValueError(
820 "Argument prompt is expected to be a string. Instead found “
821 f”{type(prompt)}. If you want to run the LLM on multiple prompts, use "
822 "generate instead."
823 )
824 return (
–> 825 self.generate(
826 [prompt],
827 stop=stop,
828 callbacks=callbacks,
829 tags=tags,
830 metadata=metadata,
831 **kwargs,
832 )
833 .generations[0][0]
834 .text
835 )

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/langchain/llms/base.py:621, in BaseLLM.generate(self, prompts, stop, callbacks, tags, metadata, **kwargs)
612 raise ValueError(
613 "Asked to cache, but no cache found at langchain.cache."
614 )
615 run_managers = [
616 callback_manager.on_llm_start(
617 dumpd(self), [prompt], invocation_params=params, options=options
618 )[0]
619 for callback_manager, prompt in zip(callback_managers, prompts)
620 ]
–> 621 output = self._generate_helper(
622 prompts, stop, run_managers, bool(new_arg_supported), **kwargs
623 )
624 return output
625 if len(missing_prompts) > 0:

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/langchain/llms/base.py:523, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
521 for run_manager in run_managers:
522 run_manager.on_llm_error(e)
–> 523 raise e
524 flattened_outputs = output.flatten()
525 for manager, flattened_output in zip(run_managers, flattened_outputs):

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/langchain/llms/base.py:510, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
500 def _generate_helper(
501 self,
502 prompts: List[str],
(…)
506 **kwargs: Any,
507 ) -> LLMResult:
508 try:
509 output = (
–> 510 self._generate(
511 prompts,
512 stop=stop,
513 # TODO: support multiple run managers
514 run_manager=run_managers[0] if run_managers else None,
515 **kwargs,
516 )
517 if new_arg_supported
518 else self._generate(prompts, stop=stop)
519 )
520 except (KeyboardInterrupt, Exception) as e:
521 for run_manager in run_managers:

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/langchain/llms/openai.py:385, in BaseOpenAI._generate(self, prompts, stop, run_manager, **kwargs)
373 choices.append(
374 {
375 “text”: generation.text,
(…)
382 }
383 )
384 else:
–> 385 response = completion_with_retry(
386 self, prompt=_prompts, run_manager=run_manager, **params
387 )
388 choices.extend(response[“choices”])
389 update_token_usage(_keys, response, token_usage)

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/langchain/llms/openai.py:115, in completion_with_retry(llm, run_manager, **kwargs)
111 @retry_decorator
112 def _completion_with_retry(**kwargs: Any) -> Any:
113 return llm.client.create(**kwargs)
–> 115 return _completion_with_retry(**kwargs)

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/tenacity/init.py:289, in BaseRetrying.wraps..wrapped_f(*args, **kw)
287 @functools.wraps(f)
288 def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
–> 289 return self(f, *args, **kw)

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/tenacity/init.py:379, in Retrying.call(self, fn, *args, **kwargs)
377 retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
378 while True:
–> 379 do = self.iter(retry_state=retry_state)
380 if isinstance(do, DoAttempt):
381 try:

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/tenacity/init.py:314, in BaseRetrying.iter(self, retry_state)
312 is_explicit_retry = fut.failed and isinstance(fut.exception(), TryAgain)
313 if not (is_explicit_retry or self.retry(retry_state)):
–> 314 return fut.result()
316 if self.after is not None:
317 self.after(retry_state)

File ~/anaconda3/envs/wen_agent/lib/python3.11/concurrent/futures/_base.py:449, in Future.result(self, timeout)
447 raise CancelledError()
448 elif self._state == FINISHED:
–> 449 return self.__get_result()
451 self._condition.wait(timeout)
453 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:

File ~/anaconda3/envs/wen_agent/lib/python3.11/concurrent/futures/_base.py:401, in Future.__get_result(self)
399 if self._exception:
400 try:
–> 401 raise self._exception
402 finally:
403 # Break a reference cycle with the exception in self._exception
404 self = None

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/tenacity/init.py:382, in Retrying.call(self, fn, *args, **kwargs)
380 if isinstance(do, DoAttempt):
381 try:
–> 382 result = fn(*args, **kwargs)
383 except BaseException: # noqa: B902
384 retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/langchain/llms/openai.py:113, in completion_with_retry.._completion_with_retry(**kwargs)
111 @retry_decorator
112 def _completion_with_retry(**kwargs: Any) -> Any:
–> 113 return llm.client.create(**kwargs)

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/openai/api_resources/completion.py:25, in Completion.create(cls, *args, **kwargs)
23 while True:
24 try:
—> 25 return super().create(*args, **kwargs)
26 except TryAgain as e:
27 if timeout is not None and time.time() > start + timeout:

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py:153, in EngineAPIResource.create(cls, api_key, api_base, api_type, request_id, api_version, organization, **params)
127 @classmethod
128 def create(
129 cls,
(…)
136 **params,
137 ):
138 (
139 deployment_id,
140 engine,
(…)
150 api_key, api_base, api_type, api_version, organization, **params
151 )
–> 153 response, _, api_key = requestor.request(
154 “post”,
155 url,
156 params=params,
157 headers=headers,
158 stream=stream,
159 request_id=request_id,
160 request_timeout=request_timeout,
161 )
163 if stream:
164 # must be an iterator
165 assert not isinstance(response, OpenAIResponse)

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/openai/api_requestor.py:298, in APIRequestor.request(self, method, url, params, headers, files, stream, request_id, request_timeout)
277 def request(
278 self,
279 method,
(…)
286 request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
287 ) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
288 result = self.request_raw(
289 method.lower(),
290 url,
(…)
296 request_timeout=request_timeout,
297 )
–> 298 resp, got_stream = self._interpret_response(result, stream)
299 return resp, got_stream, self.api_key

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/openai/api_requestor.py:700, in APIRequestor._interpret_response(self, result, stream)
692 return (
693 self._interpret_response_line(
694 line, result.status_code, result.headers, stream=True
695 )
696 for line in parse_stream(result.iter_lines())
697 ), True
698 else:
699 return (
–> 700 self._interpret_response_line(
701 result.content.decode(“utf-8”),
702 result.status_code,
703 result.headers,
704 stream=False,
705 ),
706 False,
707 )

File ~/anaconda3/envs/wen_agent/lib/python3.11/site-packages/openai/api_requestor.py:763, in APIRequestor._interpret_response_line(self, rbody, rcode, rheaders, stream)
761 stream_error = stream and “error” in resp.data
762 if stream_error or not 200 <= rcode < 300:
–> 763 raise self.handle_error_response(
764 rbody, rcode, resp.data, rheaders, stream_error=stream_error
765 )
766 return resp

AuthenticationError: 无效的令牌 (request id: 2024050909440670103808807286812)

我同事拿到这个key去一个开源的gpt3.5项目一配置就能用,我运行实例代码就一直报错是什么原因?···

正在回答 回答被采纳积分+3

1回答

tomiezhang 2024-05-10 11:12:12

确定下你的版本情况,确保你目前的langchain和opneai版本为:
langchain==0.0.279

openai==0.27.8
如果还不行,尝试换成Chatmodel

from langchain.chat_models import ChatOpenAI

llm = ChatOpenAI(
    model_name='gpt-3.5-turbo-16k',
    temperature=0,
    openai_api_key=api_key,
    openai_api_base=api_base
 )

如果都搞不定,可以看下一小节的在线笔记使用,用在线笔记来体验!

1 回复 有任何疑惑可以回复我~
  • 提问者 慕娘6279593 #1
    我已经搞清楚问题了,首先环境要非常的明确,其次base_url 一定要准确才行,否则一个不对的话就报这个错误,感觉官方API也不完善应该还是用的人还不够多,否则不会什么错误都提示这个AuthenticationError: 无效的令牌···
    回复 有任何疑惑可以回复我~ 2024-05-13 20:34:06
问题已解决,确定采纳
还有疑问,暂不采纳
意见反馈 帮助中心 APP下载
官方微信