a
    f                     @  s   d dl mZ d dlZd dlmZmZmZ d dlmZ d dl	m
Z
 d dlmZ ddlmZ d	d
lmZmZ d	dlmZ ddlmZ ddlmZ d	dlmZ erd dlmZ dddddZG dd deZG dd dZdS )    )annotationsN)TYPE_CHECKINGOptionalcast)ArgumentParser)partial)
Completion   )
get_client   )	NOT_GIVEN
NotGivenOr)is_given)CLIError)	BaseModel)Stream)_SubParsersActionz!_SubParsersAction[ArgumentParser]None)	subparserreturnc                 C  s  |  d}|jddddd |jddd	d
 |jdddd |jdddtd |jdddtd |jdddtd |jdddtd |jddtd |jddtd |jd d!dd |jd"d#td |jd$d%td |jd&d'd
 |jd(d)d
 |jd*d+d
 |jtjtd, d S )-Nzcompletions.createz-mz--modelzThe model to useT)helprequiredz-pz--promptz#An optional prompt to complete from)r   z--streamzStream tokens as they're ready.
store_true)r   actionz-Mz--max-tokensz(The maximum number of tokens to generate)r   typez-tz--temperaturezWhat sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.

Mutually exclusive with `top_p`.z-Pz--top_pa  An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.

            Mutually exclusive with `temperature`.z-nz--nz5How many sub-completions to generate for each prompt.z
--logprobsa  Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.z	--best_ofzGenerates `best_of` completions server-side and returns the 'best' (the one with the highest log probability per token). Results cannot be streamed.z--echoz2Echo back the prompt in addition to the completionz--frequency_penaltyzPositive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.z--presence_penaltyzPositive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.z--suffixz:The suffix that comes after a completion of inserted text.z--stopz3A stop sequence at which to stop generating tokens.z--userzbA unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.)funcZ
args_model)
add_parseradd_argumentintfloatset_defaultsCLICompletionscreateCLICompletionCreateArgs)r   sub r%   T/var/www/ai-form-bot/venv/lib/python3.9/site-packages/openai/cli/_api/completions.pyregister   sx    
r'   c                   @  s   e Zd ZU ded< dZded< dZded< eZd	ed
< eZded< eZ	ded< eZ
ded< eZded< eZd	ed< eZded< eZd	ed< eZd	ed< eZded< eZded< eZded< dS )r#   strmodelFboolstreamNzOptional[str]promptzNotGivenOr[int]nzNotGivenOr[str]stopuserzNotGivenOr[bool]echosuffixbest_ofzNotGivenOr[float]top_plogprobs
max_tokenstemperaturepresence_penaltyfrequency_penalty)__name__
__module____qualname____annotations__r+   r,   r   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r%   r%   r%   r&   r#   ]   s   
r#   c                   @  sH   e Zd ZedddddZeddddd	Zed
ddddZdS )r!   r#   r   )argsr   c                 C  s   t | jr"| jdkr"| jr"tdtt jj| j| j| j	| j
| j| j| j| j| j| j| j| j| j| jd}| jrtttt |ddS t| S )N   z6Can't stream completions with n>1 with the current CLI)r-   r0   r.   r/   r)   r3   r,   r1   r2   r4   r5   r6   r7   r8   T)r+   )r   r-   r+   r   r   r
   Zcompletionsr"   r0   r.   r/   r)   r3   r,   r1   r2   r4   r5   r6   r7   r8   r!   _stream_creater   r   r   _create)r=   Zmake_requestr%   r%   r&   r"   q   s0    zCLICompletions.creater   )
completionr   c                 C  sj   t | jdk}| jD ]P}|r0tjd|j tj|j |sN|jdsZtjd tj	  qd S )Nr>   z===== Completion {} =====

)
lenchoicessysstdoutwriteformatindextextendswithflush)rA   should_print_headerchoicer%   r%   r&   r@      s    
zCLICompletions._createzStream[Completion])r+   r   c                 C  s   | D ]j}t |jdk}t|jdd dD ]D}|rDtjd|j tj|j |rbtjd tj	  q(qtjd d S )Nr>   c                 S  s   | j S )N)rI   )cr%   r%   r&   <lambda>       z/CLICompletions._stream_create.<locals>.<lambda>)keyz===== Chat Completion {} =====
rB   )
rC   rD   sortedrE   rF   rG   rH   rI   rJ   rL   )r+   rA   rM   rN   r%   r%   r&   r?      s    zCLICompletions._stream_createN)r9   r:   r;   staticmethodr"   r@   r?   r%   r%   r%   r&   r!   p   s   r!   )
__future__r   rE   typingr   r   r   argparser   	functoolsr   Zopenai.types.completionr   Z_utilsr
   _typesr   r   r   _errorsr   Z_modelsr   Z
_streamingr   r   r'   r#   r!   r%   r%   r%   r&   <module>   s    H