[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[elpa] externals/llm 9658faa37e 5/9: Make embedding-async callbacks also
From: |
ELPA Syncer |
Subject: |
[elpa] externals/llm 9658faa37e 5/9: Make embedding-async callbacks also in original buffer |
Date: |
Thu, 26 Oct 2023 00:58:44 -0400 (EDT) |
branch: externals/llm
commit 9658faa37e2f07a53961c8a87736a4d6f9054468
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: Andrew Hyatt <ahyatt@gmail.com>
Make embedding-async callbacks also in original buffer
This should finish all callback-in-buffer work.
---
llm-ollama.el | 9 ++++++---
llm-openai.el | 17 ++++++++++++-----
llm-vertex.el | 12 +++++++-----
3 files changed, 25 insertions(+), 13 deletions(-)
diff --git a/llm-ollama.el b/llm-ollama.el
index 7fd6afdec9..c004679b49 100644
--- a/llm-ollama.el
+++ b/llm-ollama.el
@@ -79,14 +79,17 @@ PROVIDER is the llm-ollama provider."
(assoc-default 'embedding response))
(cl-defmethod llm-embedding-async ((provider llm-ollama) string
vector-callback error-callback)
- (llm-request-async (llm-ollama--url provider "embeddings")
+ (let ((buf (current-buffer)))
+ (llm-request-async (llm-ollama--url provider "embeddings")
:data (llm-ollama--embedding-request provider string)
:on-success (lambda (data)
- (funcall vector-callback
(llm-ollama--embedding-extract-response data)))
+ (llm-request-callback-in-buffer
+ buf vector-callback
(llm-ollama--embedding-extract-response data)))
:on-error (lambda (_ _)
;; The problem with ollama is that it doesn't
;; seem to have an error response.
- (funcall error-callback 'error "Unknown error
calling ollama"))))
+ (llm-request-callback-in-buffer
+ buf error-callback 'error "Unknown error
calling ollama")))))
(cl-defmethod llm-embedding ((provider llm-ollama) string)
(llm-ollama--embedding-extract-response
diff --git a/llm-openai.el b/llm-openai.el
index 6165ad1af6..45b53e0e38 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -81,14 +81,17 @@ PROVIDER is the llm-openai provider."
(cl-defmethod llm-embedding-async ((provider llm-openai) string
vector-callback error-callback)
(unless (llm-openai-key provider)
(error "To call Open AI API, add a key to the `llm-openai' provider."))
- (llm-request-async "https://api.openai.com/v1/embeddings"
+ (let ((buf (current-buffer)))
+ (llm-request-async "https://api.openai.com/v1/embeddings"
:headers `(("Authorization" . ,(format "Bearer %s"
(llm-openai-key provider))))
:data (llm-openai--embedding-request provider string)
:on-success (lambda (data)
- (funcall vector-callback
(llm-openai--embedding-extract-response data)))
+ (llm-request-callback-in-buffer
+ buf vector-callback
(llm-openai--embedding-extract-response data)))
:on-error (lambda (_ data)
- (funcall error-callback 'error
- (llm-openai--error-message
data)))))
+ (llm-request-callback-in-buffer
+ buf error-callback 'error
+ (llm-openai--error-message data))))))
(cl-defmethod llm-embedding ((provider llm-openai) string)
(unless (llm-openai-key provider)
@@ -117,7 +120,11 @@ STREAMING if non-nil, turn on response streaming."
(car example)
(cdr example)))
(llm-chat-prompt-examples prompt) "\n"))))
- (when system-prompt
+ ;; Add the system prompt only if there is no existing one.
+ (when (and system-prompt
+ (not (cl-some (lambda (p)
+ (eq (llm-chat-prompt-interaction-role p)
'system))
+ (llm-chat-prompt-interactions prompt))))
(push (make-llm-chat-prompt-interaction :role 'system :content
system-prompt)
(llm-chat-prompt-interactions prompt)))
(when streaming (push `("stream" . ,t) request-alist))
diff --git a/llm-vertex.el b/llm-vertex.el
index a896cf7098..4ad59ff33e 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -108,15 +108,17 @@ KEY-GENTIME keeps track of when the key was generated,
because the key must be r
(cl-defmethod llm-embedding-async ((provider llm-vertex) string
vector-callback error-callback)
(llm-vertex-refresh-key provider)
- (llm-request-async (llm-vertex--embedding-url provider)
+ (let ((buf (current-buffer)))
+ (llm-request-async (llm-vertex--embedding-url provider)
:headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider))))
:data `(("instances" . [(("content" . ,string))]))
:on-success (lambda (data)
- (funcall vector-callback
(llm-vertex--embedding-extract-response data)))
+ (llm-request-callback-in-buffer
+ buf vector-callback
(llm-vertex--embedding-extract-response data)))
:on-error (lambda (_ data)
- (funcall error-callback
- 'error
- (llm-vertex--error-message data)))))
+ (llm-request-callback-in-buffer
+ buf error-callback
+ 'error (llm-vertex--error-message data))))))
(cl-defmethod llm-embedding ((provider llm-vertex) string)
(llm-vertex-refresh-key provider)
- [elpa] externals/llm updated (b69b6e8480 -> 53b5ebcbdb), ELPA Syncer, 2023/10/26
- [elpa] externals/llm 92914e3304 1/9: Improve how conversations work and make it easier to handle them, ELPA Syncer, 2023/10/26
- [elpa] externals/llm 50ad3cbe4c 2/9: Fix issue with never correctly storing or applying the context, ELPA Syncer, 2023/10/26
- [elpa] externals/llm 59fc3d7d29 3/9: Ensure that all callbacks are in the original buffer, ELPA Syncer, 2023/10/26
- [elpa] externals/llm 16335ca7cd 4/9: Merge branch 'conversation-fix', ELPA Syncer, 2023/10/26
- [elpa] externals/llm 9658faa37e 5/9: Make embedding-async callbacks also in original buffer,
ELPA Syncer <=
- [elpa] externals/llm 0af6350d10 7/9: Add to README info about callbacks in buffer, fix convo example, ELPA Syncer, 2023/10/26
- [elpa] externals/llm 2daffebdee 8/9: Properly throw errors when sync requests receive an error code, ELPA Syncer, 2023/10/26
- [elpa] externals/llm 53b5ebcbdb 9/9: Add new provider GPT4All, ELPA Syncer, 2023/10/26
- [elpa] externals/llm ee50f9cd9f 6/9: Delete unused testing code, run conversation testers llm-tester-all, ELPA Syncer, 2023/10/26