emacs-elpa-diffs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[nongnu] elpa/gptel 38095eaed5 146/273: gptel: Fix prompt collection bug


From: ELPA Syncer
Subject: [nongnu] elpa/gptel 38095eaed5 146/273: gptel: Fix prompt collection bug + linting
Date: Wed, 1 May 2024 10:02:17 -0400 (EDT)

branch: elpa/gptel
commit 38095eaed52061f45495d6b4f7347b797fe36d28
Author: Karthik Chikmagalur <karthikchikmagalur@gmail.com>
Commit: Karthik Chikmagalur <karthikchikmagalur@gmail.com>

    gptel: Fix prompt collection bug + linting
    
    * gptel.el: Update package description.
    
    * gptel-gemini.el(gptel--request-data, gptel--parse-buffer): Add
    model temperature to request correctly.
    
    * gptel-ollama.el(gptel--parse-buffer): Ensure that newlines are
    trimmed correctly even when `gptel-prompt-prefix-string` and
    `gptel-response-prefix-string` are absent.  Fix formatting and
    linter warnings.
    
    * gptel-openai.el(gptel--parse-buffer): Ditto.
---
 gptel-gemini.el |  9 ++++++---
 gptel-ollama.el | 17 ++++++++++-------
 gptel-openai.el |  8 ++++++--
 gptel.el        | 24 +++++++++++++++---------
 4 files changed, 37 insertions(+), 21 deletions(-)

diff --git a/gptel-gemini.el b/gptel-gemini.el
index ca1c06bdee..f05f33b5f5 100644
--- a/gptel-gemini.el
+++ b/gptel-gemini.el
@@ -27,6 +27,10 @@
 (require 'cl-generic)
 (require 'map)
 
+(declare-function prop-match-value "text-property-search")
+(declare-function text-property-search-backward "text-property-search")
+(declare-function json-read "json")
+
 ;;; Gemini
 (cl-defstruct
     (gptel-gemini (:constructor gptel--make-gemini)
@@ -61,7 +65,7 @@
     (when gptel-temperature
       (setq params
             (plist-put params
-                       :temperature (max temperature 1.0))))
+                       :temperature (max gptel-temperature 1.0))))
     (when gptel-max-tokens
       (setq params
             (plist-put params
@@ -88,8 +92,7 @@
                                (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
                                        (regexp-quote 
(gptel-prompt-prefix-string)))
                                (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
-                                       (regexp-quote 
(gptel-response-prefix-string)))))
-                  )
+                                       (regexp-quote 
(gptel-response-prefix-string))))))
             prompts)
       (and max-entries (cl-decf max-entries)))
     prompts))
diff --git a/gptel-ollama.el b/gptel-ollama.el
index 2bfd30643c..08b6db8cc0 100644
--- a/gptel-ollama.el
+++ b/gptel-ollama.el
@@ -20,7 +20,7 @@
 
 ;;; Commentary:
 
-;; This file adds support for the Ollama LLM API to gptel 
+;; This file adds support for the Ollama LLM API to gptel
 
 ;;; Code:
 (require 'gptel)
@@ -92,9 +92,12 @@ Ollama models.")
                          (string-trim
                           (buffer-substring-no-properties 
(prop-match-beginning prop)
                                                           (prop-match-end 
prop))
-                          (format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote 
(gptel-prompt-prefix-string)))
-                          (format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote 
(gptel-response-prefix-string))))
-                       ""))))))
+                          (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
+                                  (regexp-quote (gptel-prompt-prefix-string)))
+                          (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
+                                  (regexp-quote 
(gptel-response-prefix-string))))
+                       "")))
+      prompts)))
 
 ;;;###autoload
 (cl-defun gptel-make-ollama
@@ -118,13 +121,13 @@ ENDPOINT (optional) is the API endpoint for completions, 
defaults to
 \"/api/generate\".
 
 HEADER (optional) is for additional headers to send with each
-request. It should be an alist or a function that retuns an
+request.  It should be an alist or a function that retuns an
 alist, like:
 ((\"Content-Type\" . \"application/json\"))
 
 KEY (optional) is a variable whose value is the API key, or
-function that returns the key. This is typically not required for
-local models like Ollama.
+function that returns the key.  This is typically not required
+for local models like Ollama.
 
 Example:
 -------
diff --git a/gptel-openai.el b/gptel-openai.el
index cfd2b98bc3..018cc4b2e3 100644
--- a/gptel-openai.el
+++ b/gptel-openai.el
@@ -40,6 +40,8 @@
 (declare-function prop-match-value "text-property-search")
 (declare-function text-property-search-backward "text-property-search")
 (declare-function json-read "json")
+(declare-function gptel-prompt-prefix-string "gptel")
+(declare-function gptel-response-prefix-string "gptel")
 
 ;;; Common backend struct for LLM support
 (cl-defstruct
@@ -100,8 +102,10 @@
                   (string-trim
                    (buffer-substring-no-properties (prop-match-beginning prop)
                                                    (prop-match-end prop))
-                   (format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote 
(gptel-prompt-prefix-string)))
-                   (format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote 
(gptel-response-prefix-string)))))
+                   (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
+                           (regexp-quote (gptel-prompt-prefix-string)))
+                   (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
+                           (regexp-quote (gptel-response-prefix-string)))))
             prompts)
       (and max-entries (cl-decf max-entries)))
     (cons (list :role "system"
diff --git a/gptel.el b/gptel.el
index 0f9bdf9448..1a61e85b56 100644
--- a/gptel.el
+++ b/gptel.el
@@ -41,13 +41,14 @@
 ;;  - You can go back and edit your previous prompts or LLM responses when
 ;;    continuing a conversation. These will be fed back to the model.
 ;;
-;; Requirements for ChatGPT/Azure:
+;; Requirements for ChatGPT, Azure or Gemini:
 ;;
-;; - You need an OpenAI API key. Set the variable `gptel-api-key' to the key or
-;;   to a function of no arguments that returns the key. (It tries to use
+;; - You need an appropriate API key. Set the variable `gptel-api-key' to the
+;;   key or to a function of no arguments that returns the key. (It tries to 
use
 ;;   `auth-source' by default)
 ;;
 ;; - For Azure: define a gptel-backend with `gptel-make-azure', which see.
+;; - For Gemini: define a gptel-backend with `gptel-make-gemini', which see.
 ;;
 ;; For local models using Ollama or GPT4All:
 ;;
@@ -72,15 +73,20 @@
 ;;   model, or choose to redirect the input or output elsewhere (such as to the
 ;;   kill ring).
 ;;
-;; - If using `org-mode': You can save this buffer to a file. When opening this
-;;   file, turning on `gptel-mode' will allow resuming the conversation.
+;; - You can save this buffer to a file. When opening this file, turning on
+;;   `gptel-mode' will allow resuming the conversation.
 ;;
 ;; To use this in any buffer:
 ;;
-;; - Select a region of text and call `gptel-send'. Call with a prefix argument
-;;   to access the menu. The contents of the buffer up to (point) are used
-;;   if no region is selected.
-;; - You can select previous prompts and responses to continue the 
conversation.
+;; - Call `gptel-send' to send the text up to the cursor.  Select a region to
+;;   send only the region.
+;;
+;; - You can select previous prompts and responses to
+;;   continue the conversation.
+;;
+;; - Call `gptel-send' with a prefix argument to access a menu where you can 
set
+;;   your backend, model and other parameters, or to redirect the
+;;   prompt/response.
 ;;
 ;; Finally, gptel offers a general purpose API for writing LLM ineractions
 ;; that suit how you work, see `gptel-request'.



reply via email to

[Prev in Thread] Current Thread [Next in Thread]