请稍等 ...
×

采纳答案成功!

向帮助你的同学说点啥吧!感谢那些助人为乐的人

openai接口调用问题

我的代码:

# hello world
from langchain.llms import OpenAI

import os
api_base = os.getenv("OPENAI_API_BASE")
api_key = os.getenv("OPENAI_KEY")
llm = OpenAI(
    model="gpt-3.5-turbo-instruct",
    temperature=0,
    openai_api_key=api_key,
    openai_api_base=api_base
    )
llm.predict("介绍下你自己")

报错信息:

{
	"name": "APIConnectionError",
	"message": "Error communicating with OpenAI: HTTPSConnectionPool(host='ai-yyds.com', port=443): Max retries exceeded with url: /v1/completions (Caused by ProxyError('Unable to connect to proxy', OSError('Tunnel connection failed: 400 Bad Request')))",
	"stack": "---------------------------------------------------------------------------
OSError                                   Traceback (most recent call last)
File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/urllib3/connectionpool.py:779, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)
    778 try:
--> 779     self._prepare_proxy(conn)
    780 except (BaseSSLError, OSError, SocketTimeout) as e:

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/urllib3/connectionpool.py:1048, in HTTPSConnectionPool._prepare_proxy(self, conn)
   1042 conn.set_tunnel(
   1043     scheme=tunnel_scheme,
   1044     host=self._tunnel_host,
   1045     port=self.port,
   1046     headers=self.proxy_headers,
   1047 )
-> 1048 conn.connect()

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/urllib3/connection.py:633, in HTTPSConnection.connect(self)
    631 self._has_connected_to_proxy = True
--> 633 self._tunnel()  # type: ignore[attr-defined]
    634 # Override the host with the one we're requesting data from.

File ~/miniconda3/envs/llm-study/lib/python3.10/http/client.py:925, in HTTPConnection._tunnel(self)
    924     self.close()
--> 925     raise OSError(f\"Tunnel connection failed: {code} {message.strip()}\")
    926 while True:

OSError: Tunnel connection failed: 400 Bad Request

The above exception was the direct cause of the following exception:

ProxyError                                Traceback (most recent call last)
ProxyError: ('Unable to connect to proxy', OSError('Tunnel connection failed: 400 Bad Request'))

The above exception was the direct cause of the following exception:

MaxRetryError                             Traceback (most recent call last)
File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/requests/adapters.py:486, in HTTPAdapter.send(self, request, stream, timeout, verify, cert, proxies)
    485 try:
--> 486     resp = conn.urlopen(
    487         method=request.method,
    488         url=url,
    489         body=request.body,
    490         headers=request.headers,
    491         redirect=False,
    492         assert_same_host=False,
    493         preload_content=False,
    494         decode_content=False,
    495         retries=self.max_retries,
    496         timeout=timeout,
    497         chunked=chunked,
    498     )
    500 except (ProtocolError, OSError) as err:

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/urllib3/connectionpool.py:877, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)
    874     log.warning(
    875         \"Retrying (%r) after connection broken by '%r': %s\", retries, err, url
    876     )
--> 877     return self.urlopen(
    878         method,
    879         url,
    880         body,
    881         headers,
    882         retries,
    883         redirect,
    884         assert_same_host,
    885         timeout=timeout,
    886         pool_timeout=pool_timeout,
    887         release_conn=release_conn,
    888         chunked=chunked,
    889         body_pos=body_pos,
    890         preload_content=preload_content,
    891         decode_content=decode_content,
    892         **response_kw,
    893     )
    895 # Handle redirect?

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/urllib3/connectionpool.py:877, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)
    874     log.warning(
    875         \"Retrying (%r) after connection broken by '%r': %s\", retries, err, url
    876     )
--> 877     return self.urlopen(
    878         method,
    879         url,
    880         body,
    881         headers,
    882         retries,
    883         redirect,
    884         assert_same_host,
    885         timeout=timeout,
    886         pool_timeout=pool_timeout,
    887         release_conn=release_conn,
    888         chunked=chunked,
    889         body_pos=body_pos,
    890         preload_content=preload_content,
    891         decode_content=decode_content,
    892         **response_kw,
    893     )
    895 # Handle redirect?

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/urllib3/connectionpool.py:847, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)
    845     new_e = ProtocolError(\"Connection aborted.\", new_e)
--> 847 retries = retries.increment(
    848     method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]
    849 )
    850 retries.sleep()

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/urllib3/util/retry.py:515, in Retry.increment(self, method, url, response, error, _pool, _stacktrace)
    514     reason = error or ResponseError(cause)
--> 515     raise MaxRetryError(_pool, url, reason) from reason  # type: ignore[arg-type]
    517 log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)

MaxRetryError: HTTPSConnectionPool(host='ai-yyds.com', port=443): Max retries exceeded with url: /v1/completions (Caused by ProxyError('Unable to connect to proxy', OSError('Tunnel connection failed: 400 Bad Request')))

During handling of the above exception, another exception occurred:

ProxyError                                Traceback (most recent call last)
File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/openai/api_requestor.py:606, in APIRequestor.request_raw(self, method, url, params, supplied_headers, files, stream, request_id, request_timeout)
    605 try:
--> 606     result = _thread_context.session.request(
    607         method,
    608         abs_url,
    609         headers=headers,
    610         data=data,
    611         files=files,
    612         stream=stream,
    613         timeout=request_timeout if request_timeout else TIMEOUT_SECS,
    614         proxies=_thread_context.session.proxies,
    615     )
    616 except requests.exceptions.Timeout as e:

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/requests/sessions.py:589, in Session.request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
    588 send_kwargs.update(settings)
--> 589 resp = self.send(prep, **send_kwargs)
    591 return resp

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/requests/sessions.py:703, in Session.send(self, request, **kwargs)
    702 # Send the request
--> 703 r = adapter.send(request, **kwargs)
    705 # Total elapsed time of the request (approximately)

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/requests/adapters.py:513, in HTTPAdapter.send(self, request, stream, timeout, verify, cert, proxies)
    512 if isinstance(e.reason, _ProxyError):
--> 513     raise ProxyError(e, request=request)
    515 if isinstance(e.reason, _SSLError):
    516     # This branch is for urllib3 v1.22 and later.

ProxyError: HTTPSConnectionPool(host='ai-yyds.com', port=443): Max retries exceeded with url: /v1/completions (Caused by ProxyError('Unable to connect to proxy', OSError('Tunnel connection failed: 400 Bad Request')))

The above exception was the direct cause of the following exception:

APIConnectionError                        Traceback (most recent call last)
Cell In[24], line 13
      6 api_key = os.getenv(\"OPENAI_KEY\")
      7 llm = OpenAI(
      8     model=\"gpt-3.5-turbo-instruct\",
      9     temperature=0,
     10     openai_api_key=api_key,
     11     openai_api_base=api_base
     12     )
---> 13 llm.predict(\"介绍下你自己\")

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:148, in deprecated.<locals>.deprecate.<locals>.warning_emitting_wrapper(*args, **kwargs)
    146     warned = True
    147     emit_warning()
--> 148 return wrapped(*args, **kwargs)

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/langchain_core/language_models/llms.py:1127, in BaseLLM.predict(self, text, stop, **kwargs)
   1125 else:
   1126     _stop = list(stop)
-> 1127 return self(text, stop=_stop, **kwargs)

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:148, in deprecated.<locals>.deprecate.<locals>.warning_emitting_wrapper(*args, **kwargs)
    146     warned = True
    147     emit_warning()
--> 148 return wrapped(*args, **kwargs)

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/langchain_core/language_models/llms.py:1086, in BaseLLM.__call__(self, prompt, stop, callbacks, tags, metadata, **kwargs)
   1079 if not isinstance(prompt, str):
   1080     raise ValueError(
   1081         \"Argument `prompt` is expected to be a string. Instead found \"
   1082         f\"{type(prompt)}. If you want to run the LLM on multiple prompts, use \"
   1083         \"`generate` instead.\"
   1084     )
   1085 return (
-> 1086     self.generate(
   1087         [prompt],
   1088         stop=stop,
   1089         callbacks=callbacks,
   1090         tags=tags,
   1091         metadata=metadata,
   1092         **kwargs,
   1093     )
   1094     .generations[0][0]
   1095     .text
   1096 )

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/langchain_core/language_models/llms.py:803, in BaseLLM.generate(self, prompts, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    788 if (self.cache is None and get_llm_cache() is None) or self.cache is False:
    789     run_managers = [
    790         callback_manager.on_llm_start(
    791             dumpd(self),
   (...)
    801         )
    802     ]
--> 803     output = self._generate_helper(
    804         prompts, stop, run_managers, bool(new_arg_supported), **kwargs
    805     )
    806     return output
    807 if len(missing_prompts) > 0:

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/langchain_core/language_models/llms.py:670, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
    668     for run_manager in run_managers:
    669         run_manager.on_llm_error(e, response=LLMResult(generations=[]))
--> 670     raise e
    671 flattened_outputs = output.flatten()
    672 for manager, flattened_output in zip(run_managers, flattened_outputs):

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/langchain_core/language_models/llms.py:657, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
    647 def _generate_helper(
    648     self,
    649     prompts: List[str],
   (...)
    653     **kwargs: Any,
    654 ) -> LLMResult:
    655     try:
    656         output = (
--> 657             self._generate(
    658                 prompts,
    659                 stop=stop,
    660                 # TODO: support multiple run managers
    661                 run_manager=run_managers[0] if run_managers else None,
    662                 **kwargs,
    663             )
    664             if new_arg_supported
    665             else self._generate(prompts, stop=stop)
    666         )
    667     except BaseException as e:
    668         for run_manager in run_managers:

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/langchain_community/llms/openai.py:460, in BaseOpenAI._generate(self, prompts, stop, run_manager, **kwargs)
    448     choices.append(
    449         {
    450             \"text\": generation.text,
   (...)
    457         }
    458     )
    459 else:
--> 460     response = completion_with_retry(
    461         self, prompt=_prompts, run_manager=run_manager, **params
    462     )
    463     if not isinstance(response, dict):
    464         # V1 client returns the response in an PyDantic object instead of
    465         # dict. For the transition period, we deep convert it to dict.
    466         response = response.dict()

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/langchain_community/llms/openai.py:123, in completion_with_retry(llm, run_manager, **kwargs)
    119 @retry_decorator
    120 def _completion_with_retry(**kwargs: Any) -> Any:
    121     return llm.client.create(**kwargs)
--> 123 return _completion_with_retry(**kwargs)

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/tenacity/__init__.py:289, in BaseRetrying.wraps.<locals>.wrapped_f(*args, **kw)
    287 @functools.wraps(f)
    288 def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
--> 289     return self(f, *args, **kw)

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/tenacity/__init__.py:379, in Retrying.__call__(self, fn, *args, **kwargs)
    377 retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
    378 while True:
--> 379     do = self.iter(retry_state=retry_state)
    380     if isinstance(do, DoAttempt):
    381         try:

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/tenacity/__init__.py:325, in BaseRetrying.iter(self, retry_state)
    323     retry_exc = self.retry_error_cls(fut)
    324     if self.reraise:
--> 325         raise retry_exc.reraise()
    326     raise retry_exc from fut.exception()
    328 if self.wait:

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/tenacity/__init__.py:158, in RetryError.reraise(self)
    156 def reraise(self) -> t.NoReturn:
    157     if self.last_attempt.failed:
--> 158         raise self.last_attempt.result()
    159     raise self

File ~/miniconda3/envs/llm-study/lib/python3.10/concurrent/futures/_base.py:451, in Future.result(self, timeout)
    449     raise CancelledError()
    450 elif self._state == FINISHED:
--> 451     return self.__get_result()
    453 self._condition.wait(timeout)
    455 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:

File ~/miniconda3/envs/llm-study/lib/python3.10/concurrent/futures/_base.py:403, in Future.__get_result(self)
    401 if self._exception:
    402     try:
--> 403         raise self._exception
    404     finally:
    405         # Break a reference cycle with the exception in self._exception
    406         self = None

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/tenacity/__init__.py:382, in Retrying.__call__(self, fn, *args, **kwargs)
    380 if isinstance(do, DoAttempt):
    381     try:
--> 382         result = fn(*args, **kwargs)
    383     except BaseException:  # noqa: B902
    384         retry_state.set_exception(sys.exc_info())  # type: ignore[arg-type]

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/langchain_community/llms/openai.py:121, in completion_with_retry.<locals>._completion_with_retry(**kwargs)
    119 @retry_decorator
    120 def _completion_with_retry(**kwargs: Any) -> Any:
--> 121     return llm.client.create(**kwargs)

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/openai/api_resources/completion.py:25, in Completion.create(cls, *args, **kwargs)
     23 while True:
     24     try:
---> 25         return super().create(*args, **kwargs)
     26     except TryAgain as e:
     27         if timeout is not None and time.time() > start + timeout:

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/openai/api_resources/abstract/engine_api_resource.py:155, in EngineAPIResource.create(cls, api_key, api_base, api_type, request_id, api_version, organization, **params)
    129 @classmethod
    130 def create(
    131     cls,
   (...)
    138     **params,
    139 ):
    140     (
    141         deployment_id,
    142         engine,
   (...)
    152         api_key, api_base, api_type, api_version, organization, **params
    153     )
--> 155     response, _, api_key = requestor.request(
    156         \"post\",
    157         url,
    158         params=params,
    159         headers=headers,
    160         stream=stream,
    161         request_id=request_id,
    162         request_timeout=request_timeout,
    163     )
    165     if stream:
    166         # must be an iterator
    167         assert not isinstance(response, OpenAIResponse)

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/openai/api_requestor.py:289, in APIRequestor.request(self, method, url, params, headers, files, stream, request_id, request_timeout)
    278 def request(
    279     self,
    280     method,
   (...)
    287     request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
    288 ) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
--> 289     result = self.request_raw(
    290         method.lower(),
    291         url,
    292         params=params,
    293         supplied_headers=headers,
    294         files=files,
    295         stream=stream,
    296         request_id=request_id,
    297         request_timeout=request_timeout,
    298     )
    299     resp, got_stream = self._interpret_response(result, stream)
    300     return resp, got_stream, self.api_key

File ~/miniconda3/envs/llm-study/lib/python3.10/site-packages/openai/api_requestor.py:619, in APIRequestor.request_raw(self, method, url, params, supplied_headers, files, stream, request_id, request_timeout)
    617     raise error.Timeout(\"Request timed out: {}\".format(e)) from e
    618 except requests.exceptions.RequestException as e:
--> 619     raise error.APIConnectionError(
    620         \"Error communicating with OpenAI: {}\".format(e)
    621     ) from e
    622 util.log_debug(
    623     \"OpenAI API response\",
    624     path=abs_url,
   (...)
    627     request_id=result.headers.get(\"X-Request-Id\"),
    628 )
    629 # Don't read the whole stream for debug logging unless necessary.

APIConnectionError: Error communicating with OpenAI: HTTPSConnectionPool(host='ai-yyds.com', port=443): Max retries exceeded with url: /v1/completions (Caused by ProxyError('Unable to connect to proxy', OSError('Tunnel connection failed: 400 Bad Request')))"
}

正在回答 回答被采纳积分+3

1回答

tomiezhang 2024-04-30 18:24:16

1.确保你的安装环境 langchain==0.0.279 openai==0.27.8
2. 因为新版本openai里3.5系列模型转为了chatmodel支持,所以你也可以试试

from langchain.chat_models import ChatOpenAI

llm = ChatOpenAI(
    model_name='gpt-3.5-turbo-16k',
    temperature=0,
    openai_api_key=api_key,
    openai_api_base=api_base
 )

3. 也可以本地pull我打好的镜像试用:

【docker镜像】:
tomiezhang/imooc:v1
tomiezhang/imooc_new:v1


0 回复 有任何疑惑可以回复我~
  • 提问者 Rex1024 #1
    时好时坏是不是这个代理网站不稳定的问题?
    回复 有任何疑惑可以回复我~ 2024-04-30 21:21:11
  • tomiezhang 回复 提问者 Rex1024 #2
    这种肯定不是代码问题啦
    回复 有任何疑惑可以回复我~ 2024-04-30 21:38:01
  • 提问者 Rex1024 回复 tomiezhang #3
    回复 tomiezhang:
    1. 版本问题检查过了
    2. 模型也换过了
    😭不知道是啥情况
    老是 Error communicating with OpenAI: HTTPSConnectionPool(host='ai-yyds.com', port=443)
    时好时坏的,偶尔会正常运行,大部分都是报连接错误。
    vpn地址也换过,网上说urlib3版本换成1.25.11也试过😭。奔溃了
    回复 有任何疑惑可以回复我~ 2024-04-30 21:43:00
问题已解决,确定采纳
还有疑问,暂不采纳
意见反馈 帮助中心 APP下载
官方微信