mirror of
https://github.com/PenumbraOS/mabl.git
synced 2026-02-04 02:08:57 -06:00
Optionally use native Gemini client with Google Search
This commit is contained in:
parent
1aa887e0f6
commit
7be80e4230
3
.idea/runConfigurations/mabl.xml
generated
3
.idea/runConfigurations/mabl.xml
generated
@ -58,7 +58,7 @@
|
||||
<option name="ADVANCED_PROFILING_ENABLED" value="false" />
|
||||
<option name="STARTUP_PROFILING_ENABLED" value="false" />
|
||||
<option name="STARTUP_CPU_PROFILING_ENABLED" value="false" />
|
||||
<option name="STARTUP_CPU_PROFILING_CONFIGURATION_NAME" value="Java/Kotlin Method Sample (legacy)" />
|
||||
<option name="STARTUP_CPU_PROFILING_CONFIGURATION_NAME" value="System Trace" />
|
||||
<option name="STARTUP_NATIVE_MEMORY_PROFILING_ENABLED" value="false" />
|
||||
<option name="NATIVE_MEMORY_SAMPLE_RATE_BYTES" value="2048" />
|
||||
</Profilers>
|
||||
@ -69,7 +69,6 @@
|
||||
<option name="SKIP_ACTIVITY_VALIDATION" value="false" />
|
||||
<method v="2">
|
||||
<option name="Gradle.BeforeRunTask" enabled="false" tasks="installDemoPlugins" externalProjectPath="$PROJECT_DIR$" vmOptions="" scriptParameters="" />
|
||||
<option name="Gradle.BeforeRunTask" enabled="false" tasks="installOpenAiPlugin" externalProjectPath="$PROJECT_DIR$" vmOptions="" scriptParameters="" />
|
||||
<option name="Android.Gradle.BeforeRunTask" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
|
||||
@ -16,7 +16,7 @@ penumbraos-sdk = "e1a344b"
|
||||
penumbraos-sdk-local = "0.1.0"
|
||||
moonlight-ui = "6ae1d5e"
|
||||
moonlight-ui-local = "0.1.0"
|
||||
langchain4j = "961b2df"
|
||||
langchain4j = "67188d8"
|
||||
ktor-client = "3.0.0"
|
||||
kotlinx-serialization = "1.7.1"
|
||||
kotlinx-coroutines = "1.8.1"
|
||||
@ -61,6 +61,7 @@ penumbraos-sdk = { group = "com.github.PenumbraOS", name = "sdk", version.ref =
|
||||
moonlight-ui = { group = "com.github.agg23", name = "moonlight", version.ref = "moonlight-ui" }
|
||||
#moonlight-ui = { group = "com.open.pin", name = "ui", version.ref = "moonlight-ui-local" }
|
||||
langchain4j = { group = "com.github.agg23.langchain4j", name = "langchain4j-bom", version.ref = "langchain4j" }
|
||||
langchain4j-gemini = { group = "com.github.agg23.langchain4j", name = "langchain4j-google-ai-gemini", version.ref = "langchain4j" }
|
||||
langchain4j-openai = { group = "com.github.agg23.langchain4j", name = "langchain4j-open-ai", version.ref = "langchain4j" }
|
||||
langchain4j-kotlin = { group = "com.github.agg23.langchain4j", name = "langchain4j-kotlin", version.ref = "langchain4j" }
|
||||
ktor-client-android = { group = "io.ktor", name = "ktor-client-android", version.ref = "ktor-client" }
|
||||
|
||||
@ -84,6 +84,7 @@ dependencies {
|
||||
implementation(libs.langchain4j.kotlin)
|
||||
implementation(libs.langchain4j)
|
||||
implementation(libs.langchain4j.openai)
|
||||
implementation(libs.langchain4j.gemini)
|
||||
|
||||
implementation(libs.androidx.camera.core)
|
||||
implementation(libs.androidx.camera.lifecycle)
|
||||
|
||||
@ -31,6 +31,7 @@ import dev.langchain4j.model.chat.StreamingChatModel
|
||||
import dev.langchain4j.model.chat.request.ChatRequestParameters
|
||||
import dev.langchain4j.model.chat.request.json.JsonObjectSchema
|
||||
import dev.langchain4j.model.chat.response.ChatResponse
|
||||
import dev.langchain4j.model.googleai.GoogleAiGeminiStreamingChatModel
|
||||
import dev.langchain4j.model.openai.OpenAiStreamingChatModel
|
||||
import kotlinx.coroutines.CoroutineScope
|
||||
import kotlinx.coroutines.Dispatchers
|
||||
@ -78,28 +79,44 @@ class LangchainLlmService : MablService("LangchainLlmService") {
|
||||
Log.e(TAG, "Failed to load LLM configuration", e)
|
||||
}
|
||||
|
||||
if (currentConfig == null) {
|
||||
val config = currentConfig
|
||||
|
||||
if (config == null) {
|
||||
Log.e(TAG, "No valid LLM configuration found")
|
||||
return@launch
|
||||
}
|
||||
|
||||
try {
|
||||
Log.d(TAG, "About to create OpenAI client")
|
||||
val apiKey = currentConfig!!.apiKey
|
||||
val baseUrl = currentConfig!!.baseUrl
|
||||
Log.d(TAG, "About to create Langchain client")
|
||||
model = when (config) {
|
||||
is LlmConfiguration.Gemini -> {
|
||||
GoogleAiGeminiStreamingChatModel.builder()
|
||||
.allowGoogleSearch(true)
|
||||
.allowGoogleMaps(true)
|
||||
.httpClientBuilder(KtorHttpClientBuilder(llmScope, client))
|
||||
.apiKey(config.apiKey)
|
||||
.modelName(config.model)
|
||||
.temperature(config.temperature)
|
||||
.maxOutputTokens(config.maxTokens).build()
|
||||
}
|
||||
|
||||
model = OpenAiStreamingChatModel.builder()
|
||||
.httpClientBuilder(KtorHttpClientBuilder(llmScope, client))
|
||||
.baseUrl(baseUrl).apiKey(apiKey)
|
||||
.modelName(currentConfig!!.model).temperature(currentConfig!!.temperature)
|
||||
.maxTokens(currentConfig!!.maxTokens).build()
|
||||
is LlmConfiguration.OpenAI -> {
|
||||
OpenAiStreamingChatModel.builder()
|
||||
.httpClientBuilder(KtorHttpClientBuilder(llmScope, client))
|
||||
.baseUrl(config.baseUrl)
|
||||
.apiKey(config.apiKey)
|
||||
.modelName(config.model)
|
||||
.temperature(config.temperature)
|
||||
.maxTokens(config.maxTokens).build()
|
||||
}
|
||||
}
|
||||
|
||||
Log.w(
|
||||
TAG,
|
||||
"OpenAI client initialized successfully with model: ${currentConfig!!.model}"
|
||||
"${config.type} client initialized successfully with model: ${config.model}"
|
||||
)
|
||||
} catch (e: Exception) {
|
||||
Log.e(TAG, "Failed to initialize OpenAI client", e)
|
||||
Log.e(TAG, "Failed to initialize Langchain client", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -0,0 +1,82 @@
|
||||
package com.penumbraos.mabl.plugins.llm
|
||||
|
||||
import kotlinx.serialization.SerialName
|
||||
import kotlinx.serialization.Serializable
|
||||
import kotlinx.serialization.json.JsonElement
|
||||
import kotlinx.serialization.json.JsonObject
|
||||
import kotlinx.serialization.json.JsonTransformingSerializer
|
||||
import kotlinx.serialization.json.buildJsonObject
|
||||
import kotlinx.serialization.json.put
|
||||
|
||||
private const val DEFAULT_MAX_TOKENS = 1000
|
||||
private const val DEFAULT_TEMPERATURE = 0.7
|
||||
|
||||
interface LlmConfig {
|
||||
val type: String
|
||||
|
||||
val name: String
|
||||
val apiKey: String
|
||||
val model: String
|
||||
val maxTokens: Int
|
||||
val temperature: Double
|
||||
val systemPrompt: String?
|
||||
}
|
||||
|
||||
@Serializable
|
||||
sealed class LlmConfiguration : LlmConfig {
|
||||
|
||||
@Serializable
|
||||
@SerialName("gemini")
|
||||
data class Gemini(
|
||||
override val type: String = "Gemini",
|
||||
|
||||
override val name: String,
|
||||
override val apiKey: String,
|
||||
override val model: String,
|
||||
override val maxTokens: Int = DEFAULT_MAX_TOKENS,
|
||||
override val temperature: Double = DEFAULT_TEMPERATURE,
|
||||
override val systemPrompt: String? = null
|
||||
) : LlmConfiguration()
|
||||
|
||||
@Serializable
|
||||
@SerialName("openai")
|
||||
data class OpenAI(
|
||||
override val type: String = "OpenAI",
|
||||
|
||||
override val name: String,
|
||||
override val apiKey: String,
|
||||
override val model: String,
|
||||
val baseUrl: String,
|
||||
override val maxTokens: Int = DEFAULT_MAX_TOKENS,
|
||||
override val temperature: Double = DEFAULT_TEMPERATURE,
|
||||
override val systemPrompt: String? = null
|
||||
) : LlmConfiguration()
|
||||
}
|
||||
|
||||
object LlmConfigurationSerializer :
|
||||
JsonTransformingSerializer<LlmConfiguration>(LlmConfiguration.serializer()) {
|
||||
override fun transformDeserialize(element: JsonElement): JsonElement {
|
||||
if (element is JsonObject && "type" !in element) {
|
||||
// If no type field, default to "openai"
|
||||
return buildJsonObject {
|
||||
put("type", "openai")
|
||||
element.forEach { (key, value) ->
|
||||
put(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return element
|
||||
}
|
||||
}
|
||||
|
||||
@Serializable
|
||||
data class LlmConfigFile(
|
||||
@Serializable(with = LlmConfigurationListSerializer::class)
|
||||
val configs: List<LlmConfiguration>
|
||||
)
|
||||
|
||||
object LlmConfigurationListSerializer : JsonTransformingSerializer<List<LlmConfiguration>>(
|
||||
kotlinx.serialization.builtins.ListSerializer(LlmConfigurationSerializer)
|
||||
) {
|
||||
override fun transformDeserialize(element: JsonElement): JsonElement = element
|
||||
}
|
||||
@ -2,28 +2,11 @@ package com.penumbraos.mabl.plugins.llm
|
||||
|
||||
import android.annotation.SuppressLint
|
||||
import android.util.Log
|
||||
import kotlinx.serialization.Serializable
|
||||
import kotlinx.serialization.json.Json
|
||||
import java.io.File
|
||||
|
||||
private const val TAG = "LlmConfigService"
|
||||
|
||||
@Serializable
|
||||
data class LlmConfiguration(
|
||||
val name: String,
|
||||
val apiKey: String,
|
||||
val model: String,
|
||||
val maxTokens: Int = 1000,
|
||||
val temperature: Double = 0.7,
|
||||
val systemPrompt: String? = null,
|
||||
val baseUrl: String
|
||||
)
|
||||
|
||||
@Serializable
|
||||
data class LlmConfigFile(
|
||||
val configs: List<LlmConfiguration>
|
||||
)
|
||||
|
||||
class LlmConfigManager {
|
||||
|
||||
private var configs: List<LlmConfiguration>? = null
|
||||
@ -50,11 +33,16 @@ class LlmConfigManager {
|
||||
val jsonString = configFile.readText()
|
||||
val configFile = json.decodeFromString<LlmConfigFile>(jsonString)
|
||||
val logMap = configFile.configs.map { config ->
|
||||
val baseUrlInfo = if (config is LlmConfiguration.OpenAI) {
|
||||
"Base URL: ${config.baseUrl}\n "
|
||||
} else {
|
||||
""
|
||||
}
|
||||
"""
|
||||
Type: ${config.type}
|
||||
Name: ${config.name}
|
||||
Model: ${config.model}
|
||||
Base URL: ${config.baseUrl}
|
||||
Max Tokens: ${config.maxTokens}
|
||||
${baseUrlInfo}Max Tokens: ${config.maxTokens}
|
||||
Temperature: ${config.temperature}
|
||||
""".trimIndent()
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user