HEX
Server: Apache/2.4.52 (Ubuntu)
System: Linux spn-python 5.15.0-89-generic #99-Ubuntu SMP Mon Oct 30 20:42:41 UTC 2023 x86_64
User: arjun (1000)
PHP: 8.1.2-1ubuntu2.20
Disabled: NONE
Upload Files
File: //usr/local/lib/python3.10/dist-packages/langchain/chains/__pycache__/llm.cpython-310.pyc
o

���g�<�@s<dZddlmZddlZddlmZmZmZmZm	Z	m
Z
mZmZddl
mZddlmZmZmZmZmZddlmZmZddlmZdd	lmZmZdd
lmZmZm Z ddl!m"Z"ddl#m$Z$m%Z%dd
l&m'Z'm(Z(m)Z)m*Z*ddl+m,Z,ddl-m.Z.ddl/m0Z0m1Z1ddl2m3Z3edddd�Gdd�de3��Z4ddd�Z5dS)z2Chain that just formats a prompt and calls an LLM.�)�annotationsN)�Any�Dict�List�Optional�Sequence�Tuple�Union�cast)�
deprecated)�AsyncCallbackManager�AsyncCallbackManagerForChainRun�CallbackManager�CallbackManagerForChainRun�	Callbacks)�BaseLanguageModel�LanguageModelInput)�BaseMessage)�BaseLLMOutputParser�StrOutputParser)�ChatGeneration�
Generation�	LLMResult)�PromptValue)�BasePromptTemplate�PromptTemplate)�Runnable�RunnableBinding�RunnableBranch�RunnableWithFallbacks)�DynamicRunnable)�get_colored_text)�
ConfigDict�Field)�Chainz0.1.17z&RunnableSequence, e.g., `prompt | llm`z1.0)�since�alternative�removalc@s�eZdZUdZed[dd��Zded<	ded	<	d
Zded<ee	d
�Z
ded<	dZded<	eed
�Z
ded<eddd�Zed\dd��Zed\dd��Z	d]d^d!d"�Z	d]d_d&d'�Z	d]d`d)d*�Z	d]dad,d-�Z	d]dbd.d/�Z	d]dcd3d4�Z	d]dcd5d6�Zeddd7d8��Zded:d;�Z	d]dfd<d=�Zd]dgd@dA�Zd]dgdBdC�Z	d]dhdEdF�Z	d]didHdI�Z 	d]djdKdL�Z!dkdNdO�Z"	d]djdPdQ�Z#edddRdS��Z$edldVdW��Z%dmdYdZ�Z&dS)n�LLMChaina^Chain to run queries against LLMs.

    This class is deprecated. See below for an example implementation using
    LangChain runnables:

        .. code-block:: python

            from langchain_core.output_parsers import StrOutputParser
            from langchain_core.prompts import PromptTemplate
            from langchain_openai import OpenAI

            prompt_template = "Tell me a {adjective} joke"
            prompt = PromptTemplate(
                input_variables=["adjective"], template=prompt_template
            )
            llm = OpenAI()
            chain = prompt | llm | StrOutputParser()

            chain.invoke("your adjective here")

    Example:
        .. code-block:: python

            from langchain.chains import LLMChain
            from langchain_community.llms import OpenAI
            from langchain_core.prompts import PromptTemplate
            prompt_template = "Tell me a {adjective} joke"
            prompt = PromptTemplate(
                input_variables=["adjective"], template=prompt_template
            )
            llm = LLMChain(llm=OpenAI(), prompt=prompt)
    �return�boolcC�dS)NT���selfr,r,�?/usr/local/lib/python3.10/dist-packages/langchain/chains/llm.py�is_lc_serializableM�zLLMChain.is_lc_serializabler�promptzSUnion[Runnable[LanguageModelInput, str], Runnable[LanguageModelInput, BaseMessage]]�llm�text�str�
output_key)�default_factoryr�
output_parserT�return_final_only�dict�
llm_kwargs�forbid)�arbitrary_types_allowed�extra�	List[str]cCs|jjS)zJWill be whatever keys the prompt expects.

        :meta private:
        )r2�input_variablesr-r,r,r/�
input_keysfszLLMChain.input_keyscCs|jr|jgS|jdgS)z=Will always return text key.

        :meta private:
        �full_generation)r9r6r-r,r,r/�output_keysns
zLLMChain.output_keysN�inputs�Dict[str, Any]�run_manager�$Optional[CallbackManagerForChainRun]�Dict[str, str]cCs|j|g|d�}|�|�dS�N�rFr)�generate�create_outputs�r.rDrF�responser,r,r/�_callyszLLMChain._call�
input_list�List[Dict[str, Any]]rc	Cs�|j||d�\}}|r|��nd}t|jt�r%|jj||fd|i|j��S|jjdd|i|j���t	t
|�d|i�}g}|D]}t|t�rO|�t
|d�g�q>|�t|d�g�q>t|d�S�	z Generate LLM result from inputs.rJN�	callbacks�stop)�message)r4)�generationsr,)�prep_prompts�	get_child�
isinstancer3r�generate_promptr;�bind�batchr
rr�appendrrr�	r.rPrF�promptsrTrS�resultsrV�resr,r,r/rK�s(����

zLLMChain.generate�)Optional[AsyncCallbackManagerForChainRun]c	�s��|j||d�IdH\}}|r|��nd}t|jt�r,|jj||fd|i|j��IdHS|jjdd|i|j���t	t
|�d|i�IdH}g}|D]}t|t�rY|�t
|d�g�qH|�t|d�g�qHt|d�SrR)�
aprep_promptsrXrYr3r�agenerate_promptr;r[�abatchr
rrr]rrrr^r,r,r/�	agenerate�s*����
�

zLLMChain.agenerate�-Tuple[List[PromptValue], Optional[List[str]]]c	s�d}t|�dkrg|fSd|dvr|dd}g}|D]?��fdd�|jjD�}|jjdi|��}t|��d�}d|}|rH|j|d|jd	�d�vrV�d|krVtd
��|�	|�q||fS)�Prepare prompts from inputs.NrrTc�i|]}|�|�qSr,r,��.0�k�rDr,r/�
<dictcomp>��z)LLMChain.prep_prompts.<locals>.<dictcomp>�green�Prompt after formatting:
�
��end�verbose�=If `stop` is present in any inputs, should be present in all.r,�
�lenr2r@�
format_promptr!�	to_string�on_textru�
ValueErrorr]�	r.rPrFrTr_�selected_inputsr2�
_colored_text�_textr,rmr/rW�s&�zLLMChain.prep_promptsc	�s��d}t|�dkr
g|fSd|dvr|dd}g}|D]B��fdd�|jjD�}|jjdi|��}t|��d�}d|}|rL|j|d|jd	�IdHd�vrZ�d|krZtd
��|�	|�q||fS)rhNrrTcrir,r,rjrmr,r/rn�roz*LLMChain.aprep_prompts.<locals>.<dictcomp>rprqrrrsrvr,rwr}r,rmr/rc�s(��zLLMChain.aprep_promptsrSr�List[Dict[str, str]]c
Cs�t�||j|j�}|jdd|i|��d�}z	|j||d�}Wnty1}z|�|�|�d}~ww|�	|�}|�
d|i�|S�z0Utilize the LLM generate method for speed gains.NrP)�namerJ�outputs)r�	configurerSru�on_chain_start�get_namerK�
BaseException�on_chain_errorrL�on_chain_end�r.rPrS�callback_managerrFrN�er�r,r,r/�apply�s$
��
��
zLLMChain.applyc
�s��t�||j|j�}|jdd|i|��d�IdH}z|j||d�IdH}Wnty;}z
|�|�IdH|�d}~ww|�	|�}|�
d|i�IdH|Sr�)rr�rSrur�r�rfr�r�rLr�r�r,r,r/�aapplys&�
����
zLLMChain.aapplycCs|jS�N�r6r-r,r,r/�_run_output_keyszLLMChain._run_output_key�
llm_resultcs0�fdd�|jD�}�jr�fdd�|D�}|S)zCreate outputs from response.cs"g|]
}�j�j�|�d|i�qS)rB)r6r8�parse_result)rk�
generationr-r,r/�
<listcomp>s���z+LLMChain.create_outputs.<locals>.<listcomp>csg|]
}�j|�ji�qSr,r�)rk�rr-r,r/r�$s)rVr9)r.r��resultr,r-r/rLs
�zLLMChain.create_outputsc�s&�|j|g|d�IdH}|�|�dSrI)rfrLrMr,r,r/�_acall's�zLLMChain._acall�kwargsrcKs|||d�|jS)�SFormat prompt with kwargs and pass to LLM.

        Args:
            callbacks: Callbacks to pass to LLMChain
            **kwargs: Keys to pass to prompt template.

        Returns:
            Completion from LLM.

        Example:
            .. code-block:: python

                completion = llm.predict(adjective="funny")
        �rSr��r.rSr�r,r,r/�predict/szLLMChain.predictc�s�|j||d�IdH|jS)r�r�N)�acallr6r�r,r,r/�apredict@s�zLLMChain.apredict�%Union[str, List[str], Dict[str, Any]]cKs<t�d�|jdd|i|��}|jjdur|jj�|�S|S)z(Call predict and then parse the results.z_The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.rSNr,)�warnings�warnr�r2r8�parse�r.rSr�r�r,r,r/�predict_and_parseQs�zLLMChain.predict_and_parse�%Union[str, List[str], Dict[str, str]]c�sD�t�d�|jdd|i|��IdH}|jjdur |jj�|�S|S)z)Call apredict and then parse the results.z`The apredict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.rSNr,)r�r�r�r2r8r�r�r,r,r/�apredict_and_parse_s��zLLMChain.apredict_and_parse�/Sequence[Union[str, List[str], Dict[str, str]]]cCs"t�d�|j||d�}|�|�S)�&Call apply and then parse the results.z]The apply_and_parse method is deprecated, instead pass an output parser directly to LLMChain.r�)r�r�r��_parse_generation�r.rPrSr�r,r,r/�apply_and_parsems
�
zLLMChain.apply_and_parser�cs"�jjdur�fdd�|D�S|S)Ncs g|]}�jj�|�j��qSr,)r2r8r�r6)rkrar-r,r/r�|s��z.LLMChain._parse_generation.<locals>.<listcomp>)r2r8)r.r�r,r-r/r�xs

�zLLMChain._parse_generationc�s*�t�d�|j||d�IdH}|�|�S)r�z^The aapply_and_parse method is deprecated, instead pass an output parser directly to LLMChain.r�N)r�r�r�r�r�r,r,r/�aapply_and_parse�s��
zLLMChain.aapply_and_parsecCr+)N�	llm_chainr,r-r,r,r/�_chain_type�r1zLLMChain._chain_typer�templatecCst�|�}|||d�S)z&Create LLMChain from LLM and template.)r3r2)r�
from_template)�clsr3r��prompt_templater,r,r/�from_string�s
zLLMChain.from_string�intcCst|j��|�Sr�)�_get_language_modelr3�get_num_tokens)r.r4r,r,r/�_get_num_tokens�szLLMChain._get_num_tokens)r)r*)r)r?r�)rDrErFrGr)rH)rPrQrFrGr)r)rPrQrFrbr)r)rPrQrFrGr)rg)rPrQrFrbr)rg)rPrQrSrr)r�)r)r5)r�rr)rQ)rDrErFrbr)rH)rSrr�rr)r5)rSrr�rr)r�)rSrr�rr)r�)rPrQrSrr)r�)r�r�r)r�)r3rr�r5r)r()r4r5r)r�)'�__name__�
__module__�__qualname__�__doc__�classmethodr0�__annotations__r6r#rr8r9r:r;r"�model_config�propertyrArCrOrKrfrWrcr�r�r�rLr�r�r�r�r�r�r�r�r�r�r�r,r,r,r/r(&sr
!�
�������
����
�r(�llm_likerr)rcCs`t|t�r|St|t�rt|j�St|t�rt|j�St|ttf�r't|j	�St
dt|�����)NzAUnable to extract BaseLanguageModel from llm_like object of type )rYrrr��boundr�runnablerr �defaultr|�type)r�r,r,r/r��s





��r�)r�rr)r)6r��
__future__rr��typingrrrrrrr	r
�langchain_core._apir�langchain_core.callbacksrr
rrr�langchain_core.language_modelsrr�langchain_core.messagesr�langchain_core.output_parsersrr�langchain_core.outputsrrr�langchain_core.prompt_valuesr�langchain_core.promptsrr�langchain_core.runnablesrrrr�%langchain_core.runnables.configurabler �langchain_core.utils.inputr!�pydanticr"r#�langchain.chains.baser$r(r�r,r,r,r/�<module>s4(�s