[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[elpa] externals/llm 609c8a6834 48/71: Fix error message handling
From: |
ELPA Syncer |
Subject: |
[elpa] externals/llm 609c8a6834 48/71: Fix error message handling |
Date: |
Fri, 17 May 2024 00:58:48 -0400 (EDT) |
branch: externals/llm
commit 609c8a683446f348f7b65d282f420a27550bd4a5
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: Andrew Hyatt <ahyatt@gmail.com>
Fix error message handling
The first argument to error callbacks should always be a symbol. The second
could be a string in many cases, but often it is parsed json which needs the
error message extracted.
---
llm-ollama.el | 25 +++++++++++++------------
llm-openai.el | 13 ++++---------
llm-request-plz.el | 21 ++++++++-------------
3 files changed, 25 insertions(+), 34 deletions(-)
diff --git a/llm-ollama.el b/llm-ollama.el
index e6dd01b305..f3d7ff4d72 100644
--- a/llm-ollama.el
+++ b/llm-ollama.el
@@ -82,6 +82,10 @@ PROVIDER is the llm-ollama provider."
"Return the embedding from the server RESPONSE."
(assoc-default 'embedding response))
+(defun llm-ollama--error-message (data)
+ "Return the error message from DATA."
+ (if (stringp data) data (assoc-default 'error data)))
+
(cl-defmethod llm-embedding-async ((provider llm-ollama) string
vector-callback error-callback)
(let ((buf (current-buffer)))
(llm-request-plz-async (llm-ollama--url provider "embeddings")
@@ -89,11 +93,10 @@ PROVIDER is the llm-ollama provider."
:on-success (lambda (data)
(llm-request-callback-in-buffer
buf vector-callback
(llm-ollama--embedding-extract-response data)))
- :on-error (lambda (_ _)
- ;; The problem with ollama is that it
doesn't
- ;; seem to have an error response.
+ :on-error (lambda (type err)
(llm-request-callback-in-buffer
- buf error-callback 'error "Unknown
error calling ollama")))))
+ buf error-callback type
+ (llm-ollama--error-message))))))
(cl-defmethod llm-embedding ((provider llm-ollama) string)
(llm-ollama--embedding-extract-response
@@ -153,10 +156,9 @@ STREAMING is a boolean to control whether to stream the
response."
(llm-provider-utils-append-to-prompt prompt response)
(llm-request-plz-callback-in-buffer buf response-callback
response)))
:on-error (lambda (code data)
- (let ((error-message (cdr (assoc 'error data))))
- (llm-request-plz-callback-in-buffer buf error-callback
'error
- (format "Problem
calling Ollama: %s message: %s"
- code
error-message)))))))
+ (llm-request-plz-callback-in-buffer
+ buf error-callback 'error
+ (llm-ollama--error-message data))))))
(cl-defmethod llm-chat-streaming ((provider llm-ollama) prompt
partial-callback response-callback error-callback)
(let ((buf (current-buffer))
@@ -173,10 +175,9 @@ STREAMING is a boolean to control whether to stream the
response."
(when-let ((response (llm-ollama--get-response data)))
(setq response-text (concat response-text response))
(llm-request-callback-in-buffer buf partial-callback
response-text)))
- :on-error (lambda (_ _)
- ;; The problem with ollama is that it doesn't
- ;; seem to have an error response.
- (llm-request-callback-in-buffer buf error-callback 'error
"Unknown error calling ollama")))))
+ :on-error (lambda (type msg)
+ (llm-request-callback-in-buffer buf error-callback type
+ (llm-ollama--error-message
msg))))))
(cl-defmethod llm-name ((provider llm-ollama))
(llm-ollama-chat-model provider))
diff --git a/llm-openai.el b/llm-openai.el
index 610c6eba75..7ee30a24b7 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -241,9 +241,8 @@ PROMPT is the prompt that needs to be updated with the
response."
(llm-openai--process-and-return
provider prompt data error-callback)))
:on-error (lambda (_ data)
- (let ((errdata (cdr (assoc 'error data))))
- (llm-request-plz-callback-in-buffer buf error-callback
'error
-
(llm-openai--error-message data)))))))
+ (llm-request-plz-callback-in-buffer buf error-callback 'error
+
(llm-openai--error-message data))))))
(cl-defmethod llm-chat ((provider llm-openai) prompt)
(llm-openai--check-key provider)
@@ -317,13 +316,9 @@ RESPONSE can be nil if the response is complete."
(llm-request-plz-callback-in-buffer
buf error-callback 'error data))))
:on-error (lambda (_ data)
- (let ((errdata
- (cdr (assoc 'error data))))
- (llm-request-plz-callback-in-buffer
+ (llm-request-plz-callback-in-buffer
buf error-callback 'error
- (format "Problem calling Open AI: %s message: %s"
- (cdr (assoc 'type errdata))
- (cdr (assoc 'message errdata))))))
+ (llm-openai--error-message data)))
:on-success (lambda (_)
(llm-request-plz-callback-in-buffer
buf
diff --git a/llm-request-plz.el b/llm-request-plz.el
index fa7a9e5284..8d7b9a94cd 100644
--- a/llm-request-plz.el
+++ b/llm-request-plz.el
@@ -94,28 +94,23 @@ TIMEOUT is the number of seconds to wait for a response."
:timeout timeout))
(defun llm-request-plz--handle-error (error on-error)
- "Handle the ERROR with the ON-ERROR callback.
-
-For HTTP errors, ON-ERROR will be called with the HTTP status
-code and the HTTP body of the error response.
-
-For Curl errors, ON-ERROR will be called with the exit code of
-the curl process and an error message."
+ "Handle the ERROR with the ON-ERROR callback."
(cond ((plz-media-type-filter-error-p error)
(let ((cause (plz-media-type-filter-error-cause error))
(response (plz-error-response error)))
- ;; TODO: What do we want to pass to callers here?
- (funcall on-error 'filter-error cause)))
+ (funcall on-error 'error
+ (format "Error with cause: %s, response: %s" cause
response))))
((plz-error-curl-error error)
(let ((curl-error (plz-error-curl-error error)))
- (funcall on-error
- (car curl-error)
- (cdr curl-error))))
+ (funcall on-error 'error
+ (format "curl error code %d: %s"
+ (car curl-error)
+ (cdr curl-error)))))
((plz-error-response error)
(when-let ((response (plz-error-response error))
(status (plz-response-status response))
(body (plz-response-body response)))
- (funcall on-error status body)))
+ (funcall on-error 'error body)))
(t (user-error "Unexpected error: %s" error))))
(cl-defun llm-request-plz-async (url &key headers data on-success media-type
- [elpa] externals/llm 99814b533d 28/71: Changes to get at least some of Gemini / Vertex working, (continued)
- [elpa] externals/llm 99814b533d 28/71: Changes to get at least some of Gemini / Vertex working, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 7919563997 30/71: Merge pull request #31 from r0man/plz, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 40909718cc 31/71: Fix function calling for Gemini & Vertex, and streaming for Gemini, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 9c07e22659 33/71: Kill the response buffer when the request completed, ELPA Syncer, 2024/05/17
- [elpa] externals/llm a61eb88c53 34/71: Merge pull request #33 from r0man/plz, ELPA Syncer, 2024/05/17
- [elpa] externals/llm fa6c445271 35/71: Fix issue with not handline the plz response correctly for streaming, ELPA Syncer, 2024/05/17
- [elpa] externals/llm f42e00265f 38/71: Add more documentation, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 2426f827d7 39/71: Add process filter error handling, ELPA Syncer, 2024/05/17
- [elpa] externals/llm ffbaa5e683 40/71: Restore error callbacks, ELPA Syncer, 2024/05/17
- [elpa] externals/llm f91d965490 42/71: Add a clause to handle process filter errors, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 609c8a6834 48/71: Fix error message handling,
ELPA Syncer <=
- [elpa] externals/llm f409841da3 49/71: Add ability to cancel processes, which is what plz returns, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 1a40b6d656 51/71: Really set reponse body to nil, ELPA Syncer, 2024/05/17
- [elpa] externals/llm c10187a07a 53/71: Set llm-request-plz-timeout to nil, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 0cb682aff4 54/71: Merge pull request #39 from r0man/plz, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 006cd1d6b2 59/71: Handle safety related errors in Vertex / Gemini, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 8a384aa073 61/71: Run handler code via a timer, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 95b9b8b515 68/71: Improve error handling in Vertex and Ollama, ELPA Syncer, 2024/05/17
- [elpa] externals/llm ece9b1fc82 69/71: Remove llm-request and fix requires, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 7f73a59fd1 12/71: Remove media-types from llm-request-plz-async, ELPA Syncer, 2024/05/17
- [elpa] externals/llm f41c55a44e 15/71: Switch Open AI completely to plz, fix async request method, ELPA Syncer, 2024/05/17