KTextAddons

ollamaplugin.cpp
1/*
2 SPDX-FileCopyrightText: 2025 Laurent Montel <montel@kde.org>
3
4 SPDX-License-Identifier: GPL-2.0-or-later
5*/
6
7#include "ollamaplugin.h"
8#include "autogeneratetext_ollama_debug.h"
9#include "core/textautogeneratechatmodel.h"
10#include "core/textautogeneratemanager.h"
11#include "ollamamanager.h"
12#include "ollamasettings.h"
13
14OllamaPlugin::OllamaPlugin(QObject *parent)
15 : TextAutogenerateText::TextAutogenerateTextPlugin{parent}
16{
17 if (!loadSettings()) {
18 qCWarning(AUTOGENERATETEXT_OLLAMA_LOG) << "Impossible to load settings";
19 return;
20 }
21
22 connect(OllamaManager::self(), &OllamaManager::modelsLoadDone, this, [this](const OllamaManager::ModelsInfo &modelinfo) {
23 if (modelinfo.hasError) {
24 setReady(false);
25 Q_EMIT errorOccurred(modelinfo.errorOccured);
26 } else {
27 setReady(true);
28 }
29 });
30 OllamaManager::self()->loadModels();
31}
32
33OllamaPlugin::~OllamaPlugin() = default;
34
35bool OllamaPlugin::loadSettings()
36{
37 setCurrentModel(OllamaSettings::model());
38 // TODO verify that server is ok.
39 return true;
40}
41
42void OllamaPlugin::clear()
43{
44 // TODO clear all thread
45}
46
47QString OllamaPlugin::result()
48{
49 // TODO
50 return {};
51}
52
53void OllamaPlugin::setPrompt(const QString &text)
54{
55 // TODO
56}
57
58QString OllamaPlugin::currentModel() const
59{
60 return mCurrentModel;
61}
62
63void OllamaPlugin::setCurrentModel(const QString &newCurrentModel)
64{
65 mCurrentModel = newCurrentModel;
66}
67
68void OllamaPlugin::stop()
69{
70 // TODO
71}
72
73void OllamaPlugin::sendToLLM(const QString &message)
74{
75 OllamaRequest req;
76 req.setMessage(message);
77 req.setModel(mCurrentModel);
78 /*
79 for (const auto &msg : m_messages | std::views::reverse) {
80 if (msg.sender == Sender::LLM) {
81 req.setContext(message.context);
82 break;
83 }
84 }
85 */
86 auto reply = OllamaManager::self()->getCompletion(req);
87
88 mConnections.insert(reply, connect(reply, &OllamaReply::contentAdded, this, [reply]() {
89 auto message = TextAutogenerateText::TextAutogenerateManager::self()->textAutoGenerateChatModel()->lastMessage();
90 message.setContent(reply->readResponse());
91 TextAutogenerateText::TextAutogenerateManager::self()->textAutoGenerateChatModel()->replaceLastMessage(message);
92 }));
93 mConnections.insert(reply, connect(reply, &OllamaReply::finished, this, [reply, this] {
94 auto message = TextAutogenerateText::TextAutogenerateManager::self()->textAutoGenerateChatModel()->lastMessage();
95 mConnections.remove(reply);
96 reply->deleteLater();
97 message.setInProgress(false);
98#if 0
99 message.context = message.llmReply->context();
100 message.info = message.llmReply->info();
101#endif
102 Q_EMIT finished(message); // TODO add message as argument ???
103 }));
104}
105
106#include "moc_ollamaplugin.cpp"
void finished()
Emits when the LLM has finished returning its response.
void contentAdded()
Emits when new content has been added to the response.
Q_EMITQ_EMIT
QMetaObject::Connection connect(const QObject *sender, PointerToMemberFunction signal, Functor functor)
QFuture< ArgsType< Signal > > connect(Sender *sender, Signal signal)
This file is part of the KDE documentation.
Documentation copyright © 1996-2025 The KDE developers.
Generated on Fri Apr 18 2025 12:00:52 by doxygen 1.13.2 written by Dimitri van Heesch, © 1997-2006

KDE's Doxygen guidelines are available online.