summaryrefslogtreecommitdiff
path: root/.config/emacs
diff options
context:
space:
mode:
Diffstat (limited to '.config/emacs')
-rw-r--r--.config/emacs/init.el2
-rw-r--r--.config/emacs/modules/bd--chat.el (renamed from .config/emacs/modules/bd--irc.el)54
2 files changed, 53 insertions, 3 deletions
diff --git a/.config/emacs/init.el b/.config/emacs/init.el
index b8d911b..c46441a 100644
--- a/.config/emacs/init.el
+++ b/.config/emacs/init.el
@@ -103,7 +103,7 @@
(require 'bd--project)
(require 'bd--files)
(require 'bd--dictionary)
-(require 'bd--irc)
+(require 'bd--chat)
(require 'bd--shells)
(require 'bd--minibuffer)
(require 'bd--buffer)
diff --git a/.config/emacs/modules/bd--irc.el b/.config/emacs/modules/bd--chat.el
index fe2d58f..a55f10a 100644
--- a/.config/emacs/modules/bd--irc.el
+++ b/.config/emacs/modules/bd--chat.el
@@ -58,6 +58,56 @@ channel, which is issuing the PART command."
:port 6697
:encryption tls))))
+(use-package gptel
+ :bind (("C-c g" . gptel-menu))
+ :config
+ (defvar bd/llama-cpp-buffer-name "*llama-cpp-proc*")
+ (defvar bd/llama-cpp-reasoning-buffer-name "*llama-cpp-reasoning*")
+ (defvar bd/llama-cpp-port "4568")
+ (defvar bd/llama-cpp-threads "8")
+ (defvar bd/llama-cpp-model-file "~/.config/guix/assets/Qwen3-4B.Q3_K_M.gguf")
+ (defvar bd/llama-cpp-reasoning-budget nil)
+ (defun bd/gptel-start-backend ()
+ (interactive)
+ (let ((process (get-buffer-process bd/llama-cpp-buffer-name)))
+ (if process
+ (message "llama-cpp process is already running!")
+ (progn
+ (start-process-shell-command
+ "llama-cpp" bd/llama-cpp-buffer-name
+ (concat "llama-server --reasoning-budget "
+ (if bd/llama-cpp-reasoning-budget "-1" "0")
+ " --port " bd/llama-cpp-port
+ " -t " bd/llama-cpp-threads
+ " -m " bd/llama-cpp-model-file)))
+ (unless (get-buffer bd/llama-cpp-reasoning-buffer-name)
+ (generate-new-buffer bd/llama-cpp-reasoning-buffer-name)))))
+ (defun bd/gptel-stop-backend ()
+ (interactive)
+ (let ((process (get-buffer-process bd/llama-cpp-buffer-name)))
+ (if process
+ (progn
+ (delete-process process)
+ (kill-buffer bd/llama-cpp-buffer-name)
+ (message "Killed %s." process))
+ (message "No llama-cpp process is running."))))
+ (defun bd/gptel-restart-backend ()
+ (interactive)
+ (bd/gptel-stop-backend)
+ (bd/gptel-start-backend))
+
+ (setopt gptel-model 'qwen-4b
+ gptel-backend (gptel-make-openai "llama-cpp"
+ :stream t
+ :protocol "http"
+ :host (concat "localhost:" bd/llama-cpp-port)
+ :models '(qwen-4b))
+ gptel-max-tokens 500
+ gptel-include-reasoning bd/llama-cpp-reasoning-buffer-name)
+
+ (gptel-make-preset 'default
+ :system "You are a wolf (furry) named Evka hired as a secretary to complete language-based tasks. First describe an action your character does, e.x.: *I tap my claws on the desk*. Finish by responding to the task as tersely as possible, in character."))
+
-(provide 'bd--irc)
-;;; bd--irc.el ends here
+(provide 'bd--chat)
+;;; bd--chat.el ends here