about summary refs log tree commit diff
path: root/dotfiles
diff options
context:
space:
mode:
authorBaitinq <[email protected]>2025-03-08 17:56:58 +0100
committerBaitinq <[email protected]>2025-03-08 17:56:58 +0100
commit1bd3b75ff633ad62d957ceada6644dd078854a96 (patch)
treec166d5ee1609ea484a32c3621e3fba5cc38ef787 /dotfiles
parentUpdate (diff)
downloadnixos-config-1bd3b75ff633ad62d957ceada6644dd078854a96.tar.gz
nixos-config-1bd3b75ff633ad62d957ceada6644dd078854a96.tar.bz2
nixos-config-1bd3b75ff633ad62d957ceada6644dd078854a96.zip
Dotfiles: Nvim: Use codecompanion instead of avante
Diffstat (limited to 'dotfiles')
-rw-r--r--dotfiles/nvim/init.lua44
1 files changed, 24 insertions, 20 deletions
diff --git a/dotfiles/nvim/init.lua b/dotfiles/nvim/init.lua
index 38ecbb2..78292a9 100644
--- a/dotfiles/nvim/init.lua
+++ b/dotfiles/nvim/init.lua
@@ -241,29 +241,33 @@ require('lazy').setup({
   'github/copilot.vim',
 
   {
-    "yetone/avante.nvim",
+    "olimorris/codecompanion.nvim",
+    config = function()
+      require("codecompanion").setup({
+         strategies = {
+          chat = {
+            adapter = "openai",
+          },
+        },
+
+        adapters = {
+          openai = function()
+            return require("codecompanion.adapters").extend("openai", {
+              schema = {
+                model = {
+                  default = "o3-mini",
+                },
+              },
+            })
+          end,
+        },
+      })
+      vim.keymap.set('n', '<leader>a', ':CodeCompanionAction<CR>', { desc = '[A]I' })
+    end,
     dependencies = {
-      "nvim-tree/nvim-web-devicons",
-      "stevearc/dressing.nvim",
       "nvim-lua/plenary.nvim",
-      "MunifTanjim/nui.nvim",
-      {
-        "MeanderingProgrammer/render-markdown.nvim",
-        opts = { file_types = { "markdown", "Avante" } },
-        ft = { "markdown", "Avante" },
-      },
+      "nvim-treesitter/nvim-treesitter",
     },
-    build = "make",
-    opts = {
-        provider = "openai",
-        openai = {
-          endpoint = "https://api.openai.com/v1",
-          model = "o3-mini", -- your desired model (or use gpt-4o, etc.)
-          timeout = 30000, -- timeout in milliseconds
-          temperature = 0, -- adjust if needed
-          max_tokens = 4096,
-        },
-      },
   },
 
   {