6 changed files with 366 additions and 0 deletions
@ -0,0 +1 @@ |
|||||
|
syntax clear |
@ -0,0 +1,22 @@ |
|||||
|
syntax clear |
||||
|
|
||||
|
syntax match UserHeader /^>>> USER/ containedin=ALL |
||||
|
syntax match AssistantHeader /^>>> ASSISTANT/ containedin=ALL |
||||
|
|
||||
|
hi def link UserHeader orangebg |
||||
|
hi def link AssistantHeader bluebg |
||||
|
|
||||
|
syntax match H1 '^#\s.*$' |
||||
|
syntax match H2 '^##\s.*$' |
||||
|
syntax match H3 '^###\s.*$' |
||||
|
|
||||
|
hi def link H1 base3 |
||||
|
hi def link H2 base2 |
||||
|
hi def link H3 base2 |
||||
|
|
||||
|
syntax region CodeBlock start='^\s*```.*$' end='^\s*```$' keepend |
||||
|
|
||||
|
syntax match InlineCode '`[^`]\+`' containedin=ALL |
||||
|
|
||||
|
hi def link markdownCodeBlock cyan |
||||
|
hi def link markdownInlineCode cyan |
@ -0,0 +1 @@ |
|||||
|
" autocmd FileType markdown syntax clear |
@ -0,0 +1,185 @@ |
|||||
|
" ChatGPT Vim Plugin |
||||
|
|
||||
|
if !has('python3') |
||||
|
echo "Python 3 support is required for ChatGPT plugin" |
||||
|
finish |
||||
|
endif |
||||
|
|
||||
|
if !exists("g:gpt_max_tokens") |
||||
|
let g:gpt_max_tokens = 2000 |
||||
|
endif |
||||
|
|
||||
|
if !exists("g:gpt_temperature") |
||||
|
let g:gpt_temperature = 0.7 |
||||
|
endif |
||||
|
|
||||
|
if !exists("g:gpt_model") |
||||
|
let g:gpt_model = 'gpt-4o' |
||||
|
endif |
||||
|
|
||||
|
if !exists("g:gpt_lang") |
||||
|
let g:gpt_lang = 'English' |
||||
|
endif |
||||
|
|
||||
|
if !exists("g:gpt_split_direction") |
||||
|
let g:gpt_split_direction = 'horizontal' |
||||
|
endif |
||||
|
|
||||
|
if !exists("g:gpt_split_ratio") |
||||
|
let g:gpt_split_ratio = 3 |
||||
|
endif |
||||
|
|
||||
|
if !exists("g:gpt_persona") |
||||
|
let g:gpt_persona = 'default' |
||||
|
endif |
||||
|
|
||||
|
let g:gpt_templates = { |
||||
|
\ 'Rewrite': 'Rewrite this more idiomatically', |
||||
|
\ 'Review': 'Review this code', |
||||
|
\ 'Document': 'Return documentation following language pattern conventions', |
||||
|
\ 'Explain': 'Explain how this works', |
||||
|
\ 'Test': 'Write a test', |
||||
|
\ 'Fix': 'Fix this error', |
||||
|
\} |
||||
|
|
||||
|
let g:gpt_keys = keys(g:gpt_templates) |
||||
|
|
||||
|
let g:gpt_personas = { |
||||
|
\ "default": '', |
||||
|
\ "bob": 'You are a helpful expert programmer we are working together to solve complex coding challenges, and I need your help. Please make sure to wrap all code blocks in ``` annotate the programming language you are using.', |
||||
|
\} |
||||
|
|
||||
|
function! DisplayChatGPTResponse(response, finish_reason, chat_gpt_session_id) " {{{ |
||||
|
let response = a:response |
||||
|
let finish_reason = a:finish_reason |
||||
|
|
||||
|
let chat_gpt_session_id = a:chat_gpt_session_id |
||||
|
|
||||
|
if !bufexists(chat_gpt_session_id) |
||||
|
if g:gpt_split_direction ==# 'vertical' |
||||
|
silent execute winwidth(0)/g:gpt_split_ratio.'vnew '. chat_gpt_session_id |
||||
|
else |
||||
|
silent execute winheight(0)/g:gpt_split_ratio.'new '. chat_gpt_session_id |
||||
|
endif |
||||
|
call setbufvar(chat_gpt_session_id, '&buftype', 'nofile') |
||||
|
call setbufvar(chat_gpt_session_id, '&bufhidden', 'hide') |
||||
|
call setbufvar(chat_gpt_session_id, '&swapfile', 0) |
||||
|
setlocal modifiable |
||||
|
setlocal wrap |
||||
|
setlocal linebreak |
||||
|
call setbufvar(chat_gpt_session_id, '&ft', 'markdown') |
||||
|
call setbufvar(chat_gpt_session_id, '&syntax', 'markdown') |
||||
|
endif |
||||
|
|
||||
|
if bufwinnr(chat_gpt_session_id) == -1 |
||||
|
if g:gpt_split_direction ==# 'vertical' |
||||
|
execute winwidth(0)/g:gpt_split_ratio.'vsplit ' . chat_gpt_session_id |
||||
|
else |
||||
|
execute winheight(0)/g:gpt_split_ratio.'split ' . chat_gpt_session_id |
||||
|
endif |
||||
|
endif |
||||
|
|
||||
|
let last_lines = getbufline(chat_gpt_session_id, '$') |
||||
|
let last_line = empty(last_lines) ? '' : last_lines[-1] |
||||
|
|
||||
|
let new_lines = substitute(last_line . response, '\n', '\r\n\r', 'g') |
||||
|
let lines = split(new_lines, '\n') |
||||
|
|
||||
|
let clean_lines = [] |
||||
|
for line in lines |
||||
|
call add(clean_lines, substitute(line, '\r', '', 'g')) |
||||
|
endfor |
||||
|
|
||||
|
call setbufline(chat_gpt_session_id, '$', clean_lines) |
||||
|
|
||||
|
execute bufwinnr(chat_gpt_session_id) . 'wincmd w' |
||||
|
" Move the viewport to the bottom of the buffer |
||||
|
normal! G |
||||
|
call cursor('$', 1) |
||||
|
|
||||
|
if finish_reason != '' |
||||
|
wincmd p |
||||
|
endif |
||||
|
endfunction |
||||
|
|
||||
|
" }}} |
||||
|
|
||||
|
function! ChatGPT(prompt, persist) abort " {{{ |
||||
|
" echo 'prompt: ' . a:prompt |
||||
|
" echo 'persist: ' . a:persist |
||||
|
|
||||
|
silent py3file ~/.vim/python/gpt.py |
||||
|
endfunction |
||||
|
|
||||
|
" }}} |
||||
|
|
||||
|
function! SendToChatGPT(prompt, bang) abort " {{{ |
||||
|
|
||||
|
let persist = (a:bang ==# '!') ? 1 : 0 |
||||
|
|
||||
|
let save_cursor = getcurpos() |
||||
|
let [current_line, current_col] = getcurpos()[1:2] |
||||
|
let save_reg = @@ |
||||
|
let save_regtype = getregtype('@') |
||||
|
|
||||
|
let [line_start, col_start] = getpos("'<")[1:2] |
||||
|
let [line_end, col_end] = getpos("'>")[1:2] |
||||
|
|
||||
|
if (col_end - col_start > 0 || line_end - line_start > 0) && |
||||
|
\ (current_line == line_start && current_col == col_start || |
||||
|
\ current_line == line_end && current_col == col_end) |
||||
|
|
||||
|
let current_line_start = line_start |
||||
|
let current_line_end = line_end |
||||
|
|
||||
|
if current_line_start == line_start && current_line_end == line_end |
||||
|
execute 'normal! ' . line_start . 'G' . col_start . '|v' . line_end . 'G' . col_end . '|y' |
||||
|
let snippet = "\n" . '```' . &syntax . "\n" . @@ . "\n" . '```' |
||||
|
else |
||||
|
let snippet = '' |
||||
|
endif |
||||
|
else |
||||
|
let snippet = '' |
||||
|
endif |
||||
|
|
||||
|
if has_key(g:gpt_templates, a:prompt) |
||||
|
let prompt = g:gpt_templates[a:prompt] |
||||
|
else |
||||
|
let prompt = a:prompt |
||||
|
endif |
||||
|
|
||||
|
let prompt = prompt . snippet |
||||
|
|
||||
|
call ChatGPT(prompt, persist) |
||||
|
|
||||
|
let @@ = save_reg |
||||
|
call setreg('@', save_reg, save_regtype) |
||||
|
|
||||
|
let curpos = getcurpos() |
||||
|
call setpos("'<", curpos) |
||||
|
call setpos("'>", curpos) |
||||
|
call setpos('.', save_cursor) |
||||
|
endfunction |
||||
|
|
||||
|
" }}} |
||||
|
|
||||
|
command! -range -bang -nargs=1 Gpt call SendToChatGPT(<q-args>, '<bang>') |
||||
|
|
||||
|
for i in range(len(g:gpt_keys)) |
||||
|
execute "command! -range -bang -nargs=0 " . g:gpt_keys[i] . " call SendToChatGPT('" . g:gpt_keys[i] . "', '<bang>')" |
||||
|
endfor |
||||
|
|
||||
|
function! SetPersona(persona) " {{{ |
||||
|
let personas = keys(g:gpt_personas) |
||||
|
if index(personas, a:persona) != -1 |
||||
|
echo 'Persona set to: ' . a:persona |
||||
|
let g:gpt_persona = a:persona |
||||
|
else |
||||
|
let g:gpt_persona = 'default' |
||||
|
echo a:persona . ' is not a valid persona. Defaulting to: ' . g:gpt_persona |
||||
|
end |
||||
|
endfunction |
||||
|
|
||||
|
" }}} |
||||
|
|
||||
|
command! -nargs=1 Persona call SetPersona(<q-args>) |
@ -0,0 +1,145 @@ |
|||||
|
import sys |
||||
|
import vim |
||||
|
import os |
||||
|
|
||||
|
try: |
||||
|
from openai import AzureOpenAI, OpenAI |
||||
|
except ImportError: |
||||
|
print("Error: openai module not found. Please install with Pip and ensure equality of the versions given by :!python3 -V, and :python3 import sys; print(sys.version)") |
||||
|
raise |
||||
|
|
||||
|
def safe_vim_eval(expression): |
||||
|
try: |
||||
|
return vim.eval(expression) |
||||
|
except vim.error: |
||||
|
return None |
||||
|
|
||||
|
def create_client(): |
||||
|
api_type = safe_vim_eval('g:gpt_api_type') |
||||
|
api_key = os.getenv('OPENAI_API_KEY') or safe_vim_eval('g:gpt_key') or safe_vim_eval('g:gpt_openai_api_key') |
||||
|
openai_base_url = os.getenv('OPENAI_PROXY') or os.getenv('OPENAI_API_BASE') or safe_vim_eval('g:gpt_openai_base_url') |
||||
|
|
||||
|
if api_type == 'azure': |
||||
|
azure_endpoint = safe_vim_eval('g:gpt_azure_endpoint') |
||||
|
azure_api_version = safe_vim_eval('g:gpt_azure_api_version') |
||||
|
azure_deployment = safe_vim_eval('g:gpt_azure_deployment') |
||||
|
assert azure_endpoint and azure_api_version and azure_deployment, "azure_endpoint, azure_api_version and azure_deployment not set property, please check your settings in `vimrc` or `enviroment`." |
||||
|
assert api_key, "api_key not set, please configure your `openai_api_key` in your `vimrc` or `enviroment`" |
||||
|
client = AzureOpenAI( |
||||
|
azure_endpoint=azure_endpoint, |
||||
|
azure_deployment=azure_deployment, |
||||
|
api_key=api_key, |
||||
|
api_version=azure_api_version, |
||||
|
) |
||||
|
else: |
||||
|
client = OpenAI( |
||||
|
base_url=openai_base_url, |
||||
|
api_key=api_key, |
||||
|
) |
||||
|
return client |
||||
|
|
||||
|
|
||||
|
def chat_gpt(prompt, persist=0): |
||||
|
token_limits = { |
||||
|
"gpt-3.5-turbo": 4097, |
||||
|
"gpt-3.5-turbo-16k": 16385, |
||||
|
"gpt-3.5-turbo-1106": 16385, |
||||
|
"gpt-4": 8192, |
||||
|
"gpt-4-turbo": 128000, |
||||
|
"gpt-4-turbo-preview": 128000, |
||||
|
"gpt-4-32k": 32768, |
||||
|
"gpt-4o": 128000, |
||||
|
"gpt-4o-mini": 128000, |
||||
|
} |
||||
|
|
||||
|
max_tokens = int(vim.eval('g:gpt_max_tokens')) |
||||
|
model = str(vim.eval('g:gpt_model')) |
||||
|
temperature = float(vim.eval('g:gpt_temperature')) |
||||
|
lang = str(vim.eval('g:gpt_lang')) |
||||
|
resp = f" And respond in {lang}." if lang != 'None' else "" |
||||
|
|
||||
|
personas = dict(vim.eval('g:gpt_personas')) |
||||
|
persona = str(vim.eval('g:gpt_persona')) |
||||
|
|
||||
|
systemCtx = {"role": "system", "content": f"{personas[persona]} {resp}"} |
||||
|
messages = [] |
||||
|
session_id = 'gpt-persistent-session' if persist == 0 else None |
||||
|
|
||||
|
# If session id exists and is in vim buffers |
||||
|
if session_id: |
||||
|
buffer = [] |
||||
|
|
||||
|
for b in vim.buffers: |
||||
|
# If the buffer name matches the session id |
||||
|
if session_id in b.name: |
||||
|
buffer = b[:] |
||||
|
break |
||||
|
|
||||
|
# Read the lines from the buffer |
||||
|
history = "\n".join(buffer).split('\n\n>>> ') |
||||
|
history.reverse() |
||||
|
|
||||
|
# Adding messages to history until token limit is reached |
||||
|
token_count = token_limits.get(model, 4097) - max_tokens - len(prompt) - len(str(systemCtx)) |
||||
|
|
||||
|
for line in history: |
||||
|
if line.startswith("USER\n"): |
||||
|
role = "user" |
||||
|
message = line.replace("USER\n", "").strip() |
||||
|
elif line.startswith("ASSISTANT\n"): |
||||
|
role = "assistant" |
||||
|
message = line.replace("ASSISTANT\n", "").strip() |
||||
|
else: |
||||
|
continue |
||||
|
token_count -= len(message) |
||||
|
if token_count > 0: |
||||
|
messages.insert(0, { |
||||
|
"role": role.lower(), |
||||
|
"content": message |
||||
|
}) |
||||
|
|
||||
|
if session_id: |
||||
|
content = '' |
||||
|
if len(buffer) == 0: |
||||
|
content += '# GPT' |
||||
|
content += '\n\n>>> USER\n' + prompt + '\n\n>>> ASSISTANT\n'.replace("'", "''") |
||||
|
|
||||
|
vim.command("call DisplayChatGPTResponse('{0}', '', '{1}')".format(content.replace("'", "''"), session_id)) |
||||
|
vim.command("redraw") |
||||
|
|
||||
|
messages.append({"role": "user", "content": prompt}) |
||||
|
messages.insert(0, systemCtx) |
||||
|
|
||||
|
try: |
||||
|
client = create_client() |
||||
|
response = client.chat.completions.create( |
||||
|
model=model, |
||||
|
messages=messages, |
||||
|
temperature=temperature, |
||||
|
max_tokens=max_tokens, |
||||
|
stream=True |
||||
|
) |
||||
|
|
||||
|
# Iterate through the response chunks |
||||
|
for chunk in response: |
||||
|
# newer Azure API responses contain empty chunks in the first streamed |
||||
|
# response |
||||
|
if not chunk.choices: |
||||
|
continue |
||||
|
|
||||
|
chunk_session_id = session_id if session_id else chunk.id |
||||
|
choice = chunk.choices[0] |
||||
|
finish_reason = choice.finish_reason |
||||
|
|
||||
|
# Call DisplayChatGPTResponse with the finish_reason or content |
||||
|
if finish_reason: |
||||
|
vim.command("call DisplayChatGPTResponse('', '{0}', '{1}')".format(finish_reason.replace("'", "''"), chunk_session_id)) |
||||
|
elif choice.delta: |
||||
|
content = choice.delta.content |
||||
|
vim.command("call DisplayChatGPTResponse('{0}', '', '{1}')".format(content.replace("'", "''"), chunk_session_id)) |
||||
|
|
||||
|
vim.command("redraw") |
||||
|
except Exception as e: |
||||
|
print("Error:", str(e)) |
||||
|
|
||||
|
chat_gpt(vim.eval('a:prompt'), int(vim.eval('a:persist'))) |
Loading…
Reference in new issue