From 29af9e0ab9ed91d51f42f95d2e132078fc7eff5f Mon Sep 17 00:00:00 2001 From: Gregory Leeman Date: Fri, 27 Jun 2025 14:28:21 +0100 Subject: [PATCH] lazygit --- plugin/gpt.vim | 299 +++++++++++++++++++++++++++---------------------- python/gpt.py | 37 ++---- vimrc | 9 -- 3 files changed, 179 insertions(+), 166 deletions(-) diff --git a/plugin/gpt.vim b/plugin/gpt.vim index 747181c..7032c91 100644 --- a/plugin/gpt.vim +++ b/plugin/gpt.vim @@ -1,185 +1,222 @@ -" ChatGPT Vim Plugin +" gpt.vim if !has('python3') - echo "Python 3 support is required for ChatGPT plugin" - finish + echo "Python 3 support is required for ChatGPT plugin" + finish endif -if !exists("g:gpt_max_tokens") - let g:gpt_max_tokens = 2000 +if !exists("g:gpt_openai_api_key") + echo "g:gpt_openai_api_key is not set. Please set it to your OpenAI API key." + finish endif -if !exists("g:gpt_temperature") - let g:gpt_temperature = 0.7 +if !exists("g:gpt_max_tokens") + let g:gpt_max_tokens = 2000 endif -if !exists("g:gpt_model") - let g:gpt_model = 'gpt-4o' +if !exists("g:gpt_temperature") + let g:gpt_temperature = 0.7 endif if !exists("g:gpt_lang") - let g:gpt_lang = 'English' + let g:gpt_lang = 'English' endif if !exists("g:gpt_split_direction") - let g:gpt_split_direction = 'horizontal' + let g:gpt_split_direction = 'horizontal' endif if !exists("g:gpt_split_ratio") - let g:gpt_split_ratio = 3 + let g:gpt_split_ratio = 3 endif if !exists("g:gpt_persona") - let g:gpt_persona = 'default' + let g:gpt_persona = 'default' endif -let g:gpt_templates = { -\ 'Rewrite': 'Rewrite this more idiomatically', -\ 'Review': 'Review this code', -\ 'Document': 'Return documentation following language pattern conventions', -\ 'Explain': 'Explain how this works', -\ 'Test': 'Write a test', -\ 'Fix': 'Fix this error', +let g:gpt_personas = { + \ "default": '', + \ "bob": 'You are a helpful expert programmer we are working together to solve complex coding challenges, and I need your help. Please make sure to wrap all code blocks in ``` annotate the programming language you are using.', \} -let g:gpt_keys = keys(g:gpt_templates) +if !exists("g:gpt_model") + let g:gpt_model = 'gpt-4o' +endif -let g:gpt_personas = { -\ "default": '', -\ "bob": 'You are a helpful expert programmer we are working together to solve complex coding challenges, and I need your help. Please make sure to wrap all code blocks in ``` annotate the programming language you are using.', +let g:gpt_models = { + \ "gpt-3.5-turbo": 4097, + \ "gpt-3.5-turbo-16k": 16385, + \ "gpt-3.5-turbo-1106": 16385, + \ "gpt-4": 8192, + \ "gpt-4-turbo": 128000, + \ "gpt-4-turbo-preview": 128000, + \ "gpt-4-32k": 32768, + \ "gpt-4o": 128000, + \ "gpt-4o-mini": 128000, \} -function! DisplayChatGPTResponse(response, finish_reason, chat_gpt_session_id) " {{{ - let response = a:response - let finish_reason = a:finish_reason - - let chat_gpt_session_id = a:chat_gpt_session_id - - if !bufexists(chat_gpt_session_id) - if g:gpt_split_direction ==# 'vertical' - silent execute winwidth(0)/g:gpt_split_ratio.'vnew '. chat_gpt_session_id - else - silent execute winheight(0)/g:gpt_split_ratio.'new '. chat_gpt_session_id - endif - call setbufvar(chat_gpt_session_id, '&buftype', 'nofile') - call setbufvar(chat_gpt_session_id, '&bufhidden', 'hide') - call setbufvar(chat_gpt_session_id, '&swapfile', 0) - setlocal modifiable - setlocal wrap - setlocal linebreak - call setbufvar(chat_gpt_session_id, '&ft', 'markdown') - call setbufvar(chat_gpt_session_id, '&syntax', 'markdown') - endif - - if bufwinnr(chat_gpt_session_id) == -1 - if g:gpt_split_direction ==# 'vertical' - execute winwidth(0)/g:gpt_split_ratio.'vsplit ' . chat_gpt_session_id - else - execute winheight(0)/g:gpt_split_ratio.'split ' . chat_gpt_session_id - endif - endif - - let last_lines = getbufline(chat_gpt_session_id, '$') - let last_line = empty(last_lines) ? '' : last_lines[-1] - - let new_lines = substitute(last_line . response, '\n', '\r\n\r', 'g') - let lines = split(new_lines, '\n') - - let clean_lines = [] - for line in lines - call add(clean_lines, substitute(line, '\r', '', 'g')) - endfor - - call setbufline(chat_gpt_session_id, '$', clean_lines) - - execute bufwinnr(chat_gpt_session_id) . 'wincmd w' +function! GptInfo() " {{{ + echo 'model: ' . g:gpt_model + echo 'max_tokens: ' . string(g:gpt_max_tokens) + echo 'temperature: ' . string(g:gpt_temperature) + echo 'persona: ' . g:gpt_persona + echo 'language: ' . g:gpt_lang + echo 'split_direction: ' . g:gpt_split_direction + echo 'split_ratio: ' . string(g:gpt_split_ratio) + echo 'personas: ' . join(keys(g:gpt_personas), ', ') + echo 'models: ' . join(keys(g:gpt_models), ', ') +endfunction + +" }}} + +function! GptDisplay(response, finish_reason, chat_gpt_session_id) " {{{ + let response = a:response + let finish_reason = a:finish_reason + + let chat_gpt_session_id = a:chat_gpt_session_id + + if !bufexists(chat_gpt_session_id) + if g:gpt_split_direction ==# 'vertical' + silent execute winwidth(0)/g:gpt_split_ratio.'vnew '. chat_gpt_session_id + else + silent execute winheight(0)/g:gpt_split_ratio.'new '. chat_gpt_session_id + endif + call setbufvar(chat_gpt_session_id, '&buftype', 'nofile') + call setbufvar(chat_gpt_session_id, '&bufhidden', 'hide') + call setbufvar(chat_gpt_session_id, '&swapfile', 0) + setlocal modifiable + setlocal wrap + setlocal linebreak + call setbufvar(chat_gpt_session_id, '&ft', 'markdown') + call setbufvar(chat_gpt_session_id, '&syntax', 'markdown') + endif + + if bufwinnr(chat_gpt_session_id) == -1 + if g:gpt_split_direction ==# 'vertical' + execute winwidth(0)/g:gpt_split_ratio.'vsplit ' . chat_gpt_session_id + else + execute winheight(0)/g:gpt_split_ratio.'split ' . chat_gpt_session_id + endif + endif + + let last_lines = getbufline(chat_gpt_session_id, '$') + let last_line = empty(last_lines) ? '' : last_lines[-1] + + let new_lines = substitute(last_line . response, '\n', '\r\n\r', 'g') + let lines = split(new_lines, '\n') + + let clean_lines = [] + for line in lines + call add(clean_lines, substitute(line, '\r', '', 'g')) + endfor + + call setbufline(chat_gpt_session_id, '$', clean_lines) + + execute bufwinnr(chat_gpt_session_id) . 'wincmd w' " Move the viewport to the bottom of the buffer - normal! G - call cursor('$', 1) + normal! G + call cursor('$', 1) - if finish_reason != '' - wincmd p - endif + if finish_reason != '' + wincmd p + endif endfunction " }}} -function! ChatGPT(prompt, persist) abort " {{{ +function! Gpt(prompt, persist) abort " {{{ " echo 'prompt: ' . a:prompt " echo 'persist: ' . a:persist - - silent py3file ~/.vim/python/gpt.py + silent py3file ~/.vim/python/gpt.py endfunction " }}} -function! SendToChatGPT(prompt, bang) abort " {{{ +function! GptSend(prompt, bang) abort " {{{ let persist = (a:bang ==# '!') ? 1 : 0 - let save_cursor = getcurpos() - let [current_line, current_col] = getcurpos()[1:2] - let save_reg = @@ - let save_regtype = getregtype('@') - - let [line_start, col_start] = getpos("'<")[1:2] - let [line_end, col_end] = getpos("'>")[1:2] - - if (col_end - col_start > 0 || line_end - line_start > 0) && - \ (current_line == line_start && current_col == col_start || - \ current_line == line_end && current_col == col_end) - - let current_line_start = line_start - let current_line_end = line_end - - if current_line_start == line_start && current_line_end == line_end - execute 'normal! ' . line_start . 'G' . col_start . '|v' . line_end . 'G' . col_end . '|y' - let snippet = "\n" . '```' . &syntax . "\n" . @@ . "\n" . '```' - else - let snippet = '' - endif - else - let snippet = '' - endif - - if has_key(g:gpt_templates, a:prompt) - let prompt = g:gpt_templates[a:prompt] - else - let prompt = a:prompt - endif + let save_cursor = getcurpos() + let [current_line, current_col] = getcurpos()[1:2] + let save_reg = @@ + let save_regtype = getregtype('@') + + let [line_start, col_start] = getpos("'<")[1:2] + let [line_end, col_end] = getpos("'>")[1:2] + + if (col_end - col_start > 0 || line_end - line_start > 0) && + \ (current_line == line_start && current_col == col_start || + \ current_line == line_end && current_col == col_end) + + let current_line_start = line_start + let current_line_end = line_end - let prompt = prompt . snippet + if current_line_start == line_start && current_line_end == line_end + execute 'normal! ' . line_start . 'G' . col_start . '|v' . line_end . 'G' . col_end . '|y' + let snippet = "\n" . '```' . &syntax . "\n" . @@ . "\n" . '```' + else + let snippet = '' + endif + else + let snippet = '' + endif - call ChatGPT(prompt, persist) + let prompt = a:prompt . snippet - let @@ = save_reg - call setreg('@', save_reg, save_regtype) + call Gpt(prompt, persist) + + let @@ = save_reg + call setreg('@', save_reg, save_regtype) + + let curpos = getcurpos() + call setpos("'<", curpos) + call setpos("'>", curpos) + call setpos('.', save_cursor) - let curpos = getcurpos() - call setpos("'<", curpos) - call setpos("'>", curpos) - call setpos('.', save_cursor) endfunction " }}} -command! -range -bang -nargs=1 Gpt call SendToChatGPT(, '') - -for i in range(len(g:gpt_keys)) - execute "command! -range -bang -nargs=0 " . g:gpt_keys[i] . " call SendToChatGPT('" . g:gpt_keys[i] . "', '')" -endfor - -function! SetPersona(persona) " {{{ - let personas = keys(g:gpt_personas) - if index(personas, a:persona) != -1 - echo 'Persona set to: ' . a:persona - let g:gpt_persona = a:persona - else - let g:gpt_persona = 'default' - echo a:persona . ' is not a valid persona. Defaulting to: ' . g:gpt_persona - end +function! GptPersona(persona=0) " {{{ + if a:persona == 0 + echo 'Personas: ' . join(keys(g:gpt_personas), ', ') + return + endif + + let personas = keys(g:gpt_personas) + + if index(personas, a:persona) != -1 + echo 'Persona set to: ' . a:persona + let g:gpt_persona = a:persona + else + let g:gpt_persona = 'default' + echo a:persona . ' is not a valid persona. Defaulting to: ' . g:gpt_persona + end + +endfunction + +" }}} + +function! GptModel(model=0) " {{{ + if a:model == 0 + echo 'Models: ' . join(keys(g:gpt_models), ', ') + return + endif + + let models = keys(g:gpt_models) + + if index(models, a:model) != -1 + echo 'model set to: ' . a:model + let g:gpt_model = a:model + else + let g:gpt_model = 'gpt-4o' + echo a:model . ' is not a valid model. Defaulting to: ' . g:gpt_model + end endfunction " }}} -command! -nargs=1 Persona call SetPersona() +command! -nargs=0 GptInfo call GptInfo() +command! -nargs=1 -range -bang Gpt call GptSend(, '') +command! -nargs=? GptPersona call GptPersona() +command! -nargs=? GptModel call GptModel() diff --git a/python/gpt.py b/python/gpt.py index 167f398..51eb338 100644 --- a/python/gpt.py +++ b/python/gpt.py @@ -3,7 +3,7 @@ import vim import os try: - from openai import AzureOpenAI, OpenAI + from openai import OpenAI except ImportError: print("Error: openai module not found. Please install with Pip and ensure equality of the versions given by :!python3 -V, and :python3 import sys; print(sys.version)") raise @@ -15,31 +15,16 @@ def safe_vim_eval(expression): return None def create_client(): - api_type = safe_vim_eval('g:gpt_api_type') api_key = os.getenv('OPENAI_API_KEY') or safe_vim_eval('g:gpt_key') or safe_vim_eval('g:gpt_openai_api_key') openai_base_url = os.getenv('OPENAI_PROXY') or os.getenv('OPENAI_API_BASE') or safe_vim_eval('g:gpt_openai_base_url') - - if api_type == 'azure': - azure_endpoint = safe_vim_eval('g:gpt_azure_endpoint') - azure_api_version = safe_vim_eval('g:gpt_azure_api_version') - azure_deployment = safe_vim_eval('g:gpt_azure_deployment') - assert azure_endpoint and azure_api_version and azure_deployment, "azure_endpoint, azure_api_version and azure_deployment not set property, please check your settings in `vimrc` or `enviroment`." - assert api_key, "api_key not set, please configure your `openai_api_key` in your `vimrc` or `enviroment`" - client = AzureOpenAI( - azure_endpoint=azure_endpoint, - azure_deployment=azure_deployment, - api_key=api_key, - api_version=azure_api_version, - ) - else: - client = OpenAI( - base_url=openai_base_url, - api_key=api_key, - ) + client = OpenAI( + base_url=openai_base_url, + api_key=api_key, + ) return client -def chat_gpt(prompt, persist=0): +def chat_gpt(prompt, persist=0) token_limits = { "gpt-3.5-turbo": 4097, "gpt-3.5-turbo-16k": 16385, @@ -54,6 +39,7 @@ def chat_gpt(prompt, persist=0): max_tokens = int(vim.eval('g:gpt_max_tokens')) model = str(vim.eval('g:gpt_model')) + token_limit = vim.eval('g:gpt_models[g:gpt_model]') temperature = float(vim.eval('g:gpt_temperature')) lang = str(vim.eval('g:gpt_lang')) resp = f" And respond in {lang}." if lang != 'None' else "" @@ -80,7 +66,7 @@ def chat_gpt(prompt, persist=0): history.reverse() # Adding messages to history until token limit is reached - token_count = token_limits.get(model, 4097) - max_tokens - len(prompt) - len(str(systemCtx)) + token_count = token_limit - max_tokens - len(prompt) - len(str(systemCtx)) for line in history: if line.startswith("USER\n"): @@ -104,7 +90,7 @@ def chat_gpt(prompt, persist=0): content += '# GPT' content += '\n\n>>> USER\n' + prompt + '\n\n>>> ASSISTANT\n'.replace("'", "''") - vim.command("call DisplayChatGPTResponse('{0}', '', '{1}')".format(content.replace("'", "''"), session_id)) + vim.command("call GptDisplay('{0}', '', '{1}')".format(content.replace("'", "''"), session_id)) vim.command("redraw") messages.append({"role": "user", "content": prompt}) @@ -131,12 +117,11 @@ def chat_gpt(prompt, persist=0): choice = chunk.choices[0] finish_reason = choice.finish_reason - # Call DisplayChatGPTResponse with the finish_reason or content if finish_reason: - vim.command("call DisplayChatGPTResponse('', '{0}', '{1}')".format(finish_reason.replace("'", "''"), chunk_session_id)) + vim.command("call GptDisplay('', '{0}', '{1}')".format(finish_reason.replace("'", "''"), chunk_session_id)) elif choice.delta: content = choice.delta.content - vim.command("call DisplayChatGPTResponse('{0}', '', '{1}')".format(content.replace("'", "''"), chunk_session_id)) + vim.command("call GptDisplay('{0}', '', '{1}')".format(content.replace("'", "''"), chunk_session_id)) vim.command("redraw") except Exception as e: diff --git a/vimrc b/vimrc index 73af368..544db21 100644 --- a/vimrc +++ b/vimrc @@ -153,13 +153,4 @@ set completeopt=noinsert,menuone,noselect let g:slime_target = "tmux" let g:slime_default_config = {"socket_name": get(split($TMUX, ","), 0), "target_pane": ":.2"} - let g:gpt_openai_api_key='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' -let g:gpt_max_tokens=2000 -let g:gpt_model='gpt-4o' -let g:gpt_temperature = 0.7 -let g:gpt_lang = 'English' -" let g:chat_gpt_split_direction = 'vertical' -" let g:split_ratio=4 - -vmap 0 (chatgpt-menu)