emacs-elpa-diffs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[elpa] externals/llm 7cb3dd4891 47/71: Merge branch 'main' into plz


From: ELPA Syncer
Subject: [elpa] externals/llm 7cb3dd4891 47/71: Merge branch 'main' into plz
Date: Fri, 17 May 2024 00:58:48 -0400 (EDT)

branch: externals/llm
commit 7cb3dd489141dd1604745921f201366c5527ae76
Merge: b438ae7a72 aaa30e3bd1
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: Andrew Hyatt <ahyatt@gmail.com>

    Merge branch 'main' into plz
---
 NEWS.org       |  7 +++++++
 llm-ollama.el  |  2 +-
 llm-openai.el  | 25 ++++++++++++++-----------
 llm-request.el | 16 +++++++++-------
 llm-tester.el  | 42 ++++++++++++++++++++++++++++++++++++++++--
 llm.el         |  2 +-
 6 files changed, 72 insertions(+), 22 deletions(-)

diff --git a/NEWS.org b/NEWS.org
index 3132d70bb9..f5e6ce9fd1 100644
--- a/NEWS.org
+++ b/NEWS.org
@@ -1,3 +1,10 @@
+* Version 0.12.2
+- Send connection issues to error callbacks, and fix an error handling issue 
in Ollama.
+* Version 0.12.1
+- Fix issue in =llm-ollama= with not using provider host for sync embeddings.
+- Fix issue in =llm-openai= where were incompatible with some Open 
AI-compatible backends due to assumptions about inconsequential JSON details.
+* Version 0.12.0
+- Add provider =llm-claude=, for Anthropic's Claude.
 * Version 0.11.0
 - Introduce function calling, now available only in Open AI and Gemini.
 - Introduce =llm-capabilities=, which returns a list of extra capabilities for 
each backend.
diff --git a/llm-ollama.el b/llm-ollama.el
index 0a80e9955f..e6dd01b305 100644
--- a/llm-ollama.el
+++ b/llm-ollama.el
@@ -97,7 +97,7 @@ PROVIDER is the llm-ollama provider."
 
 (cl-defmethod llm-embedding ((provider llm-ollama) string)
   (llm-ollama--embedding-extract-response
-   (llm-request-plz-sync (format "http://localhost:%d/api/embeddings"; (or 
(llm-ollama-port provider) 11434))
+   (llm-request-plz-sync (llm-ollama--url provider "embeddings")
                          :data (llm-ollama--embedding-request provider 
string))))
 
 (defun llm-ollama--chat-request (provider prompt streaming)
diff --git a/llm-openai.el b/llm-openai.el
index e444824262..610c6eba75 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -77,10 +77,12 @@ MODEL is the embedding model to use, or nil to use the 
default.."
 
 (defun llm-openai--error-message (err-response)
   "Return a user-visible error message from ERR-RESPONSE."
-  (let ((errdata (cdr (assoc 'error err-response))))
-    (format "Problem calling Open AI: %s message: %s"
-            (cdr (assoc 'type errdata))
-            (cdr (assoc 'message errdata)))))
+  (if (stringp err-response)
+      err-response
+    (let ((errdata (cdr (assoc 'error err-response))))
+      (format "Open AI returned error: %s message: %s"
+              (cdr (assoc 'type errdata))
+              (cdr (assoc 'message errdata))))))
 
 (defun llm-openai--handle-response (response extractor)
   "If RESPONSE is an error, throw it, else call EXTRACTOR."
@@ -96,14 +98,17 @@ MODEL is the embedding model to use, or nil to use the 
default.."
   ;; It isn't always the case that a key is needed for Open AI compatible APIs.
   )
 
-(defun llm-openai--headers (provider)
-  "From PROVIDER, return the headers to use for a request.
-This is just the key, if it exists."
+(cl-defgeneric llm-openai--headers (provider)
+  "Return the headers to use for a request from PROVIDER.")
+
+(cl-defmethod llm-openai--headers ((provider llm-openai))
   (when (llm-openai-key provider)
     `(("Authorization" . ,(format "Bearer %s" (llm-openai-key provider))))))
 
+(cl-defgeneric llm-openai--url (provider command)
+  "Return the URL for COMMAND for PROVIDER.")
+
 (cl-defmethod llm-openai--url ((_ llm-openai) command)
-  "Return the URL for COMMAND for PROVIDER."
   (concat "https://api.openai.com/v1/"; command))
 
 (cl-defmethod llm-openai--url ((provider llm-openai-compatible) command)
@@ -238,9 +243,7 @@ PROMPT is the prompt that needs to be updated with the 
response."
      :on-error (lambda (_ data)
                  (let ((errdata (cdr (assoc 'error data))))
                    (llm-request-plz-callback-in-buffer buf error-callback 
'error
-                                                       (format "Problem 
calling Open AI: %s message: %s"
-                                                               (cdr (assoc 
'type errdata))
-                                                               (cdr (assoc 
'message errdata)))))))))
+                                                       
(llm-openai--error-message data)))))))
 
 (cl-defmethod llm-chat ((provider llm-openai) prompt)
   (llm-openai--check-key provider)
diff --git a/llm-request.el b/llm-request.el
index 949e91e0a9..6329e89664 100644
--- a/llm-request.el
+++ b/llm-request.el
@@ -149,13 +149,15 @@ the buffer is turned into JSON and passed to ON-SUCCESS."
             (lambda (_ on-success on-error)
               ;; No matter what, we need to stop listening for changes.
               (remove-hook 'after-change-functions 
#'llm-request--handle-new-content t)
-              (let ((code (url-http-parse-response)))
-                (if (eq code 200)
-                    (if on-success-raw
-                        (funcall on-success-raw (llm-request--content))
-                      (funcall on-success (json-read-from-string 
(llm-request--content))))
-                  (funcall on-error code (ignore-errors
-                                           (json-read-from-string 
(llm-request--content)))))))
+              (condition-case error
+                  (let ((code (url-http-parse-response)))
+                    (if (eq code 200)
+                        (if on-success-raw
+                            (funcall on-success-raw (llm-request--content))
+                          (funcall on-success (json-read-from-string 
(llm-request--content))))
+                      (funcall on-error code (ignore-errors
+                                               (json-read-from-string 
(llm-request--content))))))
+                (error (funcall on-error (car error) (error-message-string 
error)))))
             (list on-success on-error)
             t)))
       (when (and buffer on-partial)
diff --git a/llm-tester.el b/llm-tester.el
index 402f416d6e..01f1ec1474 100644
--- a/llm-tester.el
+++ b/llm-tester.el
@@ -333,15 +333,50 @@ of by calling the `describe_function' function."
     (llm-tester-log "SUCCESS: Provider %s cancelled an async request" (type-of 
provider))
     (llm-cancel-request chat-async-request)))
 
-(defun llm-tester-all (provider &optional delay)
+(defun llm-tester-bad-provider (provider)
+  "When PROVIDER is bad in a some way, test error handling."
+  (let ((error-callback
+         (lambda (type message)
+           (cond
+            ((not (symbolp type))
+             (llm-tester-log "ERROR: Provider %s returned an error with a 
non-symbol type %s with message %s" (type-of provider) type message))
+            ((not (stringp message))
+             (llm-tester-log "ERROR: Provider %s returned an error with a 
non-string message %s with type %s" (type-of provider) message type))
+            (t
+             (llm-tester-log "SUCCESS: Provider %s returned an error of type 
%s with message %s" (type-of provider) type message)))))
+        (success-callback
+         (lambda (_)
+           (llm-tester-log "ERROR: Provider %s returned a response when it 
should have been an error" (type-of provider)))))
+    (condition-case nil
+        (progn
+          (when (member 'embeddings (llm-capabilities provider))
+            (llm-tester-log "Testing bad provider %s for correct error 
handling for embeddings" provider)
+            (llm-embedding-async
+             provider "This is a test."
+             success-callback
+             error-callback))
+          (llm-tester-log "Testing bad provider %s for correct error handling 
for chat" provider)
+          (llm-chat-async
+           provider
+           (llm-make-simple-chat-prompt "This is a test")
+           success-callback
+           error-callback))
+      (error (llm-tester-log "ERROR: Provider %s threw an error when it should 
have been caught" (type-of provider))))))
+
+(defun llm-tester-all (provider &optional bad-variants delay)
   "Test all llm functionality for PROVIDER.
+
+BAD-VARIANTS are a list of providers that are expected to be
+unable to be successfully called for anything (embeddings, chat,
+etc).
+
 DELAY is the number of seconds to wait between tests.  The
 default is 1.  Delays can help avoid rate limiting."
   (let ((separator (string-pad "" 30 ?=))
         (delay (or delay 1)))
     (llm-tester-log "\n%s\nTesting for %s\n%s\n"
                     separator (type-of provider) separator)
-    (when (member 'embedding (llm-capabilities provider))
+    (when (member 'embeddings (llm-capabilities provider))
       (llm-tester-embedding-sync provider)
       (sleep-for delay)
       (llm-tester-embedding-async provider)
@@ -371,6 +406,9 @@ default is 1.  Delays can help avoid rate limiting."
       (sleep-for delay)
       (llm-tester-function-calling-conversation-async provider)
       (sleep-for delay))
+    (dolist (bad-variant bad-variants)
+      (llm-tester-bad-provider bad-variant)
+      (sleep-for delay))
     (sleep-for 10)
     (llm-tester-log "%s\nEnd of testing for %s\n\n"
                     separator (type-of provider))))
diff --git a/llm.el b/llm.el
index 677ad34e2e..2cfbb91aab 100644
--- a/llm.el
+++ b/llm.el
@@ -5,7 +5,7 @@
 ;; Author: Andrew Hyatt <ahyatt@gmail.com>
 ;; Homepage: https://github.com/ahyatt/llm
 ;; Package-Requires: ((emacs "28.1"))
-;; Package-Version: 0.10.0
+;; Package-Version: 0.12.1
 ;; SPDX-License-Identifier: GPL-3.0-or-later
 ;;
 ;; This program is free software; you can redistribute it and/or



reply via email to

[Prev in Thread] Current Thread [Next in Thread]