HEX
Server: Apache/2.4.52 (Ubuntu)
System: Linux spn-python 5.15.0-89-generic #99-Ubuntu SMP Mon Oct 30 20:42:41 UTC 2023 x86_64
User: arjun (1000)
PHP: 8.1.2-1ubuntu2.20
Disabled: NONE
Upload Files
File: //usr/local/lib/python3.10/dist-packages/langchain_openai/llms/__pycache__/base.cpython-310.pyc
o

;��g+i�@sddlmZddlZddlZddlmZmZmZmZm	Z	m
Z
mZmZm
Z
mZmZmZmZddlZddlZddlmZmZddlmZddlmZmZmZddlmZddlm Z m!Z!m"Z"dd	l#m$Z$m%Z%m&Z&m'Z'dd
l(m)Z)e�*e+�Z,ddd�Z-ddd�Z.Gdd�de�Z/Gdd�de/�Z0dS)�)�annotationsN)
�AbstractSet�Any�
AsyncIterator�
Collection�Dict�Iterator�List�Literal�Mapping�Optional�Set�Tuple�Union)�AsyncCallbackManagerForLLMRun�CallbackManagerForLLMRun)�BaseLLM)�
Generation�GenerationChunk�	LLMResult)�get_pydantic_field_names)�_build_model_kwargs�from_env�secret_from_env)�
ConfigDict�Field�	SecretStr�model_validator)�Self�keys�Set[str]�response�Dict[str, Any]�token_usage�return�NonecCsN|�|d�}|D]}||vr|d|||<q	|||d|7<q	dS)zUpdate token usage.�usageN)�intersection)rr!r#�_keys_to_use�_key�r*�E/usr/local/lib/python3.10/dist-packages/langchain_openai/llms/base.py�_update_token_usage%s�r,�stream_responsercCsR|ds	tdd�St|dddt|dd�dd�|dd�dd�d	�d
�S)z0Convert a stream response to a generation chunk.�choices�)�textrr0�
finish_reasonN�logprobs�r1r2�r0�generation_info)r�dict�get)r-r*r*r+�$_stream_response_to_generation_chunk1s
��r8cseZdZUdZeddd�Zded<eddd�Zded<edd	d
�Zded<	d
Z	ded<	dZ
ded<	dZded<	dZded<	dZ
ded<	dZded<	dZded<	eed�Zded<	ededdd�d �Zd!ed"<	ed#ed$dd�d �Zd%ed&<	ed'ed(d)gdd�d �Zd%ed*<	eed+dd�d�Zd%ed,<d-Zded.<	edd/d
�Zd0ed1<	dZd2ed3<	d4Zded5<	dZd6ed7<	dZd6ed8<	d9Zd:ed;<	e�Z d<ed=<	d>Z!d?ed@<	dZ"d%edA<	dZ#dBedC<dZ$dDedE<dZ%dFedG<	dZ&dFedH<	dZ'dIedJ<	e(ddK�Z)e*dLdM�e+d�dPdQ���Z,e*dRdM�d�dTdU��Z-e.d�dVdW��Z/		d�d�d_d`�Z0		d�d�dcdd�Z1		d�d�dhdi�Z2		d�d�djdk�Z3	d�d�dndo�Z4ddp�d�dudv�Z5e.d�dwdx��Z6e.d�dzd{��Z7e.d�d|d}��Z8d��fd�d��Z9e:d�d�d���Z;e.d�d�d���Z<d�d�d��Z=�Z>S)��
BaseOpenAIz'Base OpenAI large language model class.NT)�default�excluder�client�async_client�gpt-3.5-turbo-instruct�model)r:�alias�str�
model_namegffffff�?�float�temperature��int�
max_tokens��top_pr�frequency_penalty�presence_penalty�n�best_of)�default_factoryr"�model_kwargs�api_key�OPENAI_API_KEY)r:)r@rNzOptional[SecretStr]�openai_api_key�base_url�OPENAI_API_BASE�
Optional[str]�openai_api_base�organization�
OPENAI_ORG_ID�OPENAI_ORGANIZATION�openai_organization�OPENAI_PROXY�openai_proxy��
batch_size�timeoutz,Union[float, Tuple[float, float], Any, None]�request_timeoutzOptional[Dict[str, float]]�
logit_bias��max_retriesz
Optional[int]�seedr2F�bool�	streamingz'Union[Literal['all'], AbstractSet[str]]�allowed_special�allz&Union[Literal['all'], Collection[str]]�disallowed_special�tiktoken_model_namezUnion[Mapping[str, str], None]�default_headersz!Union[Mapping[str, object], None]�
default_queryzUnion[Any, None]�http_client�http_async_clientzOptional[Mapping[str, Any]]�
extra_body)�populate_by_name�before)�mode�valuesr$cCst|�}t||�}|S)z>Build extra kwargs from additional params that were passed in.)rr)�clsrs�all_required_field_namesr*r*r+�build_extra�s
zBaseOpenAI.build_extra�afterrcCs�|jdkr	td��|jr|jdkrtd��|jr!|jdkr!td��|jr)|j��nd|j|j|j|j	|j
|jd�}|jsMd|j
i}tjdi|�|��j|_|jsad|ji}tjdi|�|��j|_|S)	z?Validate that api key and python package exists in environment.rHzn must be at least 1.z!Cannot stream results when n > 1.z'Cannot stream results when best_of > 1.N)rPrWrSr_rcrkrlrmr*)rL�
ValueErrorrfrMrR�get_secret_valuerZrVr`rcrkrlr<rm�openai�OpenAI�completionsr=rn�AsyncOpenAI)�self�
client_params�
sync_specific�async_specificr*r*r+�validate_environment�s6
�

���zBaseOpenAI.validate_environmentcCs�|j|j|j|j|j|j|jd�}|jdur|j|d<|jdur%|j|d<|j	dur/|j	|d<|j
dkr9|j
|d<i|�|j�S)z2Get the default parameters for calling OpenAI API.)rDrIrJrKrLrdr2NrarGrorHrM)rDrIrJrKrLrdr2rarGrorMrO)r~�
normal_paramsr*r*r+�_default_params�s"�








zBaseOpenAI._default_params�prompt�stop�Optional[List[str]]�run_manager�"Optional[CallbackManagerForLLMRun]�kwargs�Iterator[GenerationChunk]cks��i|j�|�ddi�}|�||g|�|jjdd|i|��D]'}t|t�s*|��}t|�}|rC|j|j	||j
|jr?|jdndd�|VqdS�N�streamTr�r2)�chunk�verboser2r*)�_invocation_params�get_sub_promptsr<�create�
isinstancer6�
model_dumpr8�on_llm_new_tokenr0r�r5�r~r�r�r�r��params�stream_respr�r*r*r+�_stream�s$�
��
�zBaseOpenAI._stream�'Optional[AsyncCallbackManagerForLLMRun]�AsyncIterator[GenerationChunk]cKs��i|j�|�ddi�}|�||g|�|jjdd|i|��IdH2z.3dHW}t|t�s1|��}t|�}|rM|j|j	||j
|jrF|jdndd�IdH|Vq"6dSr�)r�r�r=r�r�r6r�r8r�r0r�r5r�r*r*r+�_astream�s,�
��
��
�zBaseOpenAI._astream�prompts�	List[str]rcKsT|j}i|�|�}|�|||�}g}i}hd�}	d}
|D]�}|jrkt|�dkr+td��d}|j|d||fi|��D]
}
|durC|
}q:||
7}q:|dusNJ�|�|j|jr[|j�	d�nd|jre|j�	d�ndd��q|j
jdd	|i|��}t|t
�s|��}|�	d
�r�t|�	d
���|�|d�t|	||�|
s�|�	d�}
q|j|||||
d
�S)atCall out to OpenAI's endpoint with k unique prompts.

        Args:
            prompts: The prompts to pass into the model.
            stop: Optional list of stop words to use when generating.

        Returns:
            The full LLM output.

        Example:
            .. code-block:: python

                response = openai.generate(["Tell me a joke."])
        ��total_tokens�
prompt_tokens�completion_tokensNrH�,Cannot stream results with multiple prompts.rr1r2�r0r1r2r��errorr.�system_fingerprint�r�r*)r�r�rf�lenrxr��appendr0r5r7r<r�r�r6r��extendr,�create_llm_result�r~r�r�r�r�r��sub_promptsr.r#�_keysr��_prompts�
generationr�r!r*r*r+�	_generatesP
����


�
�zBaseOpenAI._generatec�s@�|j}i|�|�}|�|||�}g}i}hd�}	d}
|D]x}|jrqt|�dkr,td��d}|j|d||fi|��2z3dHW}
|durH|
}q;||
7}q;6|dusTJ�|�|j|jra|j�	d�nd|jrk|j�	d�ndd��q|j
jdd	|i|��IdH}t|t
�s�|��}|�|d
�t|	||�q|j|||||
d�S)
z:Call out to OpenAI's endpoint async with k unique prompts.r�NrHr�rr1r2r�r�r.r�r*)r�r�rfr�rxr�r�r0r5r7r=r�r�r6r�r�r,r�r�r*r*r+�
_ageneratelsR�
��
�����

�zBaseOpenAI._agenerater��List[List[str]]csh|dur||d<|ddkr!t��dkrtd�����d�|d<��fdd	�tdt���j�D�}|S)
z!Get the sub prompts for llm call.Nr�rG���rHz7max_tokens set to -1 not supported for multiple inputs.rcsg|]}�||�j��qSr*)r^)�.0�i�r�r~r*r+�
<listcomp>�s��z.BaseOpenAI.get_sub_prompts.<locals>.<listcomp>)r�rx�max_tokens_for_prompt�ranger^)r~r�r�r�r�r*r�r+r��s��zBaseOpenAI.get_sub_promptsr�r.r#�Dict[str, int]r�cCstg}|�d|j�}t|�D]\}}	||||d|�}
|�dd�|
D��q
||jd�}|r4||d<t||d�S)z2Create the LLMResult from the choices and prompts.rLrHc	Ss0g|]}t|dt|�d�|�d�d�d��qS)r0r1r2r3r4)rr6r7)r��choicer*r*r+r��s����z0BaseOpenAI.create_llm_result.<locals>.<listcomp>)r#rBr�)�generations�
llm_output)r7rL�	enumerater�rBr)r~r.r�r�r#r�r�rLr��_�sub_choicesr�r*r*r+r��s
��zBaseOpenAI.create_llm_resultcCs|jS)z,Get the parameters used to invoke the model.)r��r~r*r*r+r��szBaseOpenAI._invocation_params�Mapping[str, Any]cCsid|ji�|j�S)zGet the identifying parameters.rB)rBr�r�r*r*r+�_identifying_params�szBaseOpenAI._identifying_paramscC�dS)zReturn type of llm.rzr*r�r*r*r+�	_llm_type��zBaseOpenAI._llm_typer0�	List[int]csz|jdur
|�|�Stjddkrt��|�S|jp|j}zt�|�}Wnt	y2t�
d�}Ynw|j||j|j
d�S)z-Get the token IDs using the tiktoken package.NrH��cl100k_base)rgri)�custom_get_token_ids�sys�version_info�super�get_num_tokensrjrB�tiktoken�encoding_for_model�KeyError�get_encoding�encodergri)r~r0rB�enc��	__class__r*r+�
get_token_ids�s

��zBaseOpenAI.get_token_ids�	modelnamec
Cs�idd�dd�dd�dd�dd�dd�d	d
�dd
�dd
�d
d�dd�dd�dd�dd�dd�dd�dd�dddddddddddd��}d|vrN|�d�d}|�|d �}|d urgtd!|�d"�d#�|�����|S)$aoCalculate the maximum number of tokens possible to generate for a model.

        Args:
            modelname: The modelname we want to know the context size for.

        Returns:
            The maximum context size

        Example:
            .. code-block:: python

                max_tokens = openai.modelname_to_contextsize("gpt-3.5-turbo-instruct")
        zgpt-4o-minii�zgpt-4ozgpt-4o-2024-05-13zgpt-4i z
gpt-4-0314z
gpt-4-0613z	gpt-4-32ki�zgpt-4-32k-0314zgpt-4-32k-0613z
gpt-3.5-turboizgpt-3.5-turbo-0301zgpt-3.5-turbo-0613zgpt-3.5-turbo-16ki@zgpt-3.5-turbo-16k-0613r>ztext-ada-001i�adai�iiAi)ztext-babbage-001�babbageztext-curie-001�curie�davinciztext-davinci-003ztext-davinci-002zcode-davinci-002zcode-davinci-001zcode-cushman-002zcode-cushman-001zft-�:rNzUnknown model: z=. Please provide a valid OpenAI model name.Known models are: z, )�splitr7rx�joinr)r��model_token_mapping�context_sizer*r*r+�modelname_to_contextsize�sr��������	�
���
������ 
��z#BaseOpenAI.modelname_to_contextsizecCs|�|j�S)z$Get max context size for this model.)r�rBr�r*r*r+�max_context_size7szBaseOpenAI.max_context_sizecCs|�|�}|j|S)aoCalculate the maximum number of tokens possible to generate for a prompt.

        Args:
            prompt: The prompt to pass into the model.

        Returns:
            The maximum number of tokens to generate for a prompt.

        Example:
            .. code-block:: python

                max_tokens = openai.max_token_for_prompt("Tell me a joke.")
        )r�r�)r~r��
num_tokensr*r*r+r�<s

z BaseOpenAI.max_tokens_for_prompt)rsr"r$r)r$r�r$r")NN)
r�rAr�r�r�r�r�rr$r�)
r�rAr�r�r�r�r�rr$r�)
r�r�r�r�r�r�r�rr$r)
r�r�r�r�r�r�r�rr$r)N)r�r"r�r�r�r�r$r�)r.rr�r�r�r"r#r�r�rUr$r)r$r�)r$rA)r0rAr$r�)r�rAr$rF)r$rF)r�rAr$rF)?�__name__�
__module__�__qualname__�__doc__rr<�__annotations__r=rBrDrGrIrJrKrLrMr6rOrrRrrVrZr\r^r`rarcrdr2rf�setrgrirjrkrlrmrnror�model_configr�classmethodrvr��propertyr�r�r�r�r�r�r�r�r�r�r��staticmethodr�r�r��
__classcell__r*r*r�r+r9@s�
����
��	
 �� �S�<��;r9cs^eZdZdZeddd��Zeddd��Zed�fd
d��Zedd
d��Z	eddd��Z
�ZS)r{u�OpenAI completion model integration.

    Setup:
        Install ``langchain-openai`` and set environment variable ``OPENAI_API_KEY``.

        .. code-block:: bash

            pip install -U langchain-openai
            export OPENAI_API_KEY="your-api-key"

    Key init args — completion params:
        model: str
            Name of OpenAI model to use.
        temperature: float
            Sampling temperature.
        max_tokens: Optional[int]
            Max number of tokens to generate.
        logprobs: Optional[bool]
            Whether to return logprobs.
        stream_options: Dict
            Configure streaming outputs, like whether to return token usage when
            streaming (``{"include_usage": True}``).

    Key init args — client params:
        timeout: Union[float, Tuple[float, float], Any, None]
            Timeout for requests.
        max_retries: int
            Max number of retries.
        api_key: Optional[str]
            OpenAI API key. If not passed in will be read from env var OPENAI_API_KEY.
        base_url: Optional[str]
            Base URL for API requests. Only specify if using a proxy or service
            emulator.
        organization: Optional[str]
            OpenAI organization ID. If not passed in will be read from env
            var OPENAI_ORG_ID.

    See full list of supported init args and their descriptions in the params section.

    Instantiate:
        .. code-block:: python

            from langchain_openai import OpenAI

            llm = OpenAI(
                model="gpt-3.5-turbo-instruct",
                temperature=0,
                max_retries=2,
                # api_key="...",
                # base_url="...",
                # organization="...",
                # other params...
            )

    Invoke:
        .. code-block:: python

            input_text = "The meaning of life is "
            llm.invoke(input_text)

        .. code-block:: none

            "a philosophical question that has been debated by thinkers and scholars for centuries."

    Stream:
        .. code-block:: python

            for chunk in llm.stream(input_text):
                print(chunk, end="|")

        .. code-block:: none

            a| philosophical| question| that| has| been| debated| by| thinkers| and| scholars| for| centuries|.

        .. code-block:: python

            "".join(llm.stream(input_text))

        .. code-block:: none

            "a philosophical question that has been debated by thinkers and scholars for centuries."

    Async:
        .. code-block:: python

            await llm.ainvoke(input_text)

            # stream:
            # async for chunk in (await llm.astream(input_text)):
            #    print(chunk)

            # batch:
            # await llm.abatch([input_text])

        .. code-block:: none

            "a philosophical question that has been debated by thinkers and scholars for centuries."

    r$r�cCsgd�S)z*Get the namespace of the langchain object.)�	langchain�llmsrzr*�rtr*r*r+�get_lc_namespace�szOpenAI.get_lc_namespacerecCr�)z9Return whether this model can be serialized by Langchain.Tr*r�r*r*r+�is_lc_serializable�r�zOpenAI.is_lc_serializabler"csid|ji�t�j�S)Nr?)rBr�r�r�r�r*r+r��szOpenAI._invocation_params�Dict[str, str]cCsddiS)NrRrQr*r�r*r*r+�
lc_secrets�szOpenAI.lc_secretscCs8i}|jr
|j|d<|jr|j|d<|jr|j|d<|S)NrVrZr\)rVrZr\)r~�
attributesr*r*r+�
lc_attributes�s


zOpenAI.lc_attributes)r$r�)r$rer�)r$r�)r�r�r�r�r�r�r�r�r�r�r�r�r*r*r�r+r{Nsdr{)rr r!r"r#r"r$r%)r-r"r$r)1�
__future__r�loggingr��typingrrrrrrr	r
rrr
rrrzr��langchain_core.callbacksrr�#langchain_core.language_models.llmsr�langchain_core.outputsrrr�langchain_core.utilsr�langchain_core.utils.utilsrrr�pydanticrrrr�typing_extensionsr�	getLoggerr��loggerr,r8r9r{r*r*r*r+�<module>s,<