Mon, 26 Aug 2024 16:01:11 +0200
Implemented the 'Remove Model' menu action.
diff -r 3118d16e526e -r c471738b75b3 OllamaInterface/OllamaClient.py --- a/OllamaInterface/OllamaClient.py Sun Aug 25 19:44:24 2024 +0200 +++ b/OllamaInterface/OllamaClient.py Mon Aug 26 16:01:11 2024 +0200 @@ -206,12 +206,14 @@ @param model name of the model @type str + @return flag indicating success + @rtype bool """ - # TODO: not implemented yet ollamaRequest = { - "name": model, + "model": model, } - self.__sendRequest("delete", data=ollamaRequest) + _, status = self.__sendSyncRequest("delete", data=ollamaRequest, delete=True) + return status == 200 # HTTP status 200 OK def list(self): """ @@ -232,15 +234,18 @@ for model in response["models"]: name = model["name"] if name: - models.append(name.replace(":latest", "")) + models.append(name) self.modelsList.emit(models) def listDetails(self): """ Public method to request a list of models available locally from the 'ollama' server with some model details. + + @return list of dictionaries containing the available models and related data + @rtype list[dict[str, Any]] """ - response = self.__sendSyncRequest("tags") + response, _ = self.__sendSyncRequest("tags") models = [] if response is not None: @@ -264,8 +269,11 @@ def listRunning(self): """ Public method to request a list of running models from the 'ollama' server. + + @return list of dictionaries containing the running models and related data + @rtype list[dict[str, Any]] """ - response = self.__sendSyncRequest("ps") + response, _ = self.__sendSyncRequest("ps") models = [] if response is not None: @@ -325,7 +333,7 @@ """ return self.__state - def __getServerReply(self, endpoint, data=None): + def __getServerReply(self, endpoint, data=None, delete=False): """ Private method to send a request to the 'ollama' server and return a reply object. @@ -335,10 +343,12 @@ @param data dictionary containing the data to send to the server (defaults to None) @type dict (optional) + @param delete flag indicating to send a delete request (defaults to False) + @type bool (optional) @return 'ollama' server reply @rtype QNetworkReply """ - ollamaUrl = QUrl( + ollamaUrl = QUrl( "{0}://{1}:{2}/api/{3}".format( self.__plugin.getPreferences("OllamaScheme"), ( @@ -360,7 +370,12 @@ QNetworkRequest.KnownHeaders.ContentTypeHeader, "application/json" ) jsonData = json.dumps(data).encode("utf-8") - reply = self.__networkManager.post(request, jsonData) + if delete: + reply = self.__networkManager.sendCustomRequest( + request, b"DELETE", jsonData + ) + else: + reply = self.__networkManager.post(request, jsonData) else: reply = self.__networkManager.get(request) reply.errorOccurred.connect(lambda error: self.__errorOccurred(error, reply)) @@ -381,35 +396,8 @@ """ self.__state = OllamaClientState.Requesting - ##ollamaUrl = QUrl( - ##"{0}://{1}:{2}/api/{3}".format( - ##self.__plugin.getPreferences("OllamaScheme"), - ##( - ##"127.0.0.1" - ##if self.__localServer - ##else self.__plugin.getPreferences("OllamaHost") - ##), - ##( - ##self.__plugin.getPreferences("OllamaLocalPort") - ##if self.__localServer - ##else self.__plugin.getPreferences("OllamaPort") - ##), - ##endpoint, - ##) - ##) - ##request = QNetworkRequest(ollamaUrl) - ##if data is not None: - ##request.setHeader( - ##QNetworkRequest.KnownHeaders.ContentTypeHeader, "application/json" - ##) - ##jsonData = json.dumps(data).encode("utf-8") - ##reply = self.__networkManager.post(request, jsonData) - ##else: - ##reply = self.__networkManager.get(request) -## reply = self.__getServerReply(endpoint=endpoint, data=data) reply.finished.connect(lambda: self.__replyFinished(reply)) - ##reply.errorOccurred.connect(lambda error: self.__errorOccurred(error, reply)) reply.readyRead.connect(lambda: self.__processData(reply, processResponse)) self.__replies.append(reply) @@ -462,7 +450,7 @@ if data and processResponse: processResponse(data) - def __sendSyncRequest(self, endpoint, data=None): + def __sendSyncRequest(self, endpoint, data=None, delete=False): """ Private method to send a request to the 'ollama' server and handle its responses. @@ -472,10 +460,15 @@ @param data dictionary containing the data to send to the server (defaults to None) @type dict (optional) + @param delete flag indicating to send a delete request (defaults to False) + @type bool (optional) + @return tuple containing the data sent by the 'ollama' server and the HTTP + status code + @rtype tuple of (Any, int) """ self.__state = OllamaClientState.Requesting - reply = self.__getServerReply(endpoint=endpoint, data=data) + reply = self.__getServerReply(endpoint=endpoint, data=data, delete=delete) while not reply.isFinished(): QCoreApplication.processEvents() QThread.msleep(100) @@ -484,13 +477,15 @@ self.__state = OllamaClientState.Finished + statusCode = reply.attribute(QNetworkRequest.Attribute.HttpStatusCodeAttribute) + if reply.error() == QNetworkReply.NetworkError.NoError: buffer = bytes(reply.readAll()) with contextlib.suppress(json.JSONDecodeError): data = json.loads(buffer) - return data + return data, statusCode - return None + return None, statusCode def heartbeat(self): """
diff -r 3118d16e526e -r c471738b75b3 OllamaInterface/OllamaDetailedModelsDialog.py --- a/OllamaInterface/OllamaDetailedModelsDialog.py Sun Aug 25 19:44:24 2024 +0200 +++ b/OllamaInterface/OllamaDetailedModelsDialog.py Mon Aug 26 16:01:11 2024 +0200 @@ -39,7 +39,7 @@ model["id"], Globals.dataString(model["size"]), model["modified"].strftime("%Y-%m-%d %H:%M:%S"), - ] + ], ) for column in range(self.modelsList.columnCount()):
diff -r 3118d16e526e -r c471738b75b3 OllamaInterface/OllamaRunningModelsDialog.py --- a/OllamaInterface/OllamaRunningModelsDialog.py Sun Aug 25 19:44:24 2024 +0200 +++ b/OllamaInterface/OllamaRunningModelsDialog.py Mon Aug 26 16:01:11 2024 +0200 @@ -40,7 +40,7 @@ Globals.dataString(model["size"]), model["processor"], model["expires"].strftime("%Y-%m-%d %H:%M:%S"), - ] + ], ) for column in range(self.modelsList.columnCount()):
diff -r 3118d16e526e -r c471738b75b3 OllamaInterface/OllamaWidget.py --- a/OllamaInterface/OllamaWidget.py Sun Aug 25 19:44:24 2024 +0200 +++ b/OllamaInterface/OllamaWidget.py Mon Aug 26 16:01:11 2024 +0200 @@ -9,7 +9,7 @@ import json import os -from PyQt6.QtCore import QProcess, QProcessEnvironment, Qt, QTimer, pyqtSlot, QUrl +from PyQt6.QtCore import QProcess, QProcessEnvironment, Qt, QTimer, QUrl, pyqtSlot from PyQt6.QtGui import QDesktopServices from PyQt6.QtWidgets import ( QDialog, @@ -88,6 +88,8 @@ self.__localServerDialog = None self.__localServerProcess = None + self.__availableModels = [] + self.__connectClient() self.__initOllamaMenu() @@ -174,10 +176,14 @@ @param modelNames list of model names @type list[str] """ + self.__availableModels = modelNames[:] + self.modelComboBox.clear() self.modelComboBox.addItem("") - self.modelComboBox.addItems(sorted(modelNames)) + self.modelComboBox.addItems( + sorted(n.replace(":latest", "") for n in modelNames) + ) @pyqtSlot(list) def __checkHistoryModels(self, modelNames): @@ -188,10 +194,9 @@ @param modelNames list of model names @type list[str] """ + names = [n.replace(":latest", "") for n in modelNames] for index in range(self.__chatHistoryLayout.count() - 1): - self.__chatHistoryLayout.itemAt(index).widget().checkModelAvailable( - modelNames - ) + self.__chatHistoryLayout.itemAt(index).widget().checkModelAvailable(names) ############################################################################ ## Methods handling signals from the chat history widgets. @@ -601,7 +606,6 @@ """ # TODO: implement the menu and menu methods # * Pull Model - # * Remove Model ################################################################### ## Menu with Chat History related actions ################################################################### @@ -631,7 +635,9 @@ ) self.__modelMenu.addSeparator() self.__modelMenu.addAction(self.tr("Download Model"), self.__pullModel) - self.__modelMenu.addAction(self.tr("Remove Model"), self.__removeModel) + self.__removeModelAct = self.__modelMenu.addAction( + self.tr("Remove Model"), self.__removeModel + ) ################################################################### ## Menu with Local Server related actions @@ -683,6 +689,8 @@ self.__localServerProcess is not None and self.__localServerDialog is None ) + self.__removeModelAct.setEnabled(bool(self.__availableModels)) + @pyqtSlot() def __ollamaConfigure(self): """ @@ -928,5 +936,32 @@ """ Private slot to remove a model from the 'ollama' server. """ - # TODO: not implemented yet - pass + if self.__availableModels: + modelName, ok = QInputDialog.getItem( + self, + self.tr("Remove Model"), + self.tr("Select the model to be removed by the 'ollama' server:"), + [""] + sorted(self.__availableModels), + 0, + False, + ) + if ok and modelName: + deleted = self.__client.remove(modelName) + if deleted: + EricMessageBox.information( + self, + self.tr("Remove Model"), + self.tr( + "<p>The model <b>{0}</b> was deleted successfully.</p>" + ).format(modelName), + ) + self.__client.list() # reload the list of models + else: + EricMessageBox.warning( + self, + self.tr("Remove Model"), + self.tr( + "<p>The model <b>{0}</b> could not be removed from the" + " 'ollama' server.</p>" + ).format(modelName), + )