|
|
@ -24,7 +24,7 @@ def create_client(): |
|
|
|
return client |
|
|
|
|
|
|
|
|
|
|
|
def chat_gpt(prompt, persist=0) |
|
|
|
def chat_gpt(prompt, persist=0): |
|
|
|
token_limits = { |
|
|
|
"gpt-3.5-turbo": 4097, |
|
|
|
"gpt-3.5-turbo-16k": 16385, |
|
|
@ -39,7 +39,7 @@ def chat_gpt(prompt, persist=0) |
|
|
|
|
|
|
|
max_tokens = int(vim.eval('g:gpt_max_tokens')) |
|
|
|
model = str(vim.eval('g:gpt_model')) |
|
|
|
token_limit = vim.eval('g:gpt_models[g:gpt_model]') |
|
|
|
token_limit = int(vim.eval('g:gpt_models[g:gpt_model]')) |
|
|
|
temperature = float(vim.eval('g:gpt_temperature')) |
|
|
|
lang = str(vim.eval('g:gpt_lang')) |
|
|
|
resp = f" And respond in {lang}." if lang != 'None' else "" |
|
|
|