Some checks failed
Backend Tests / Static Checks (push) Has been cancelled
Backend Tests / Tests (other) (push) Has been cancelled
Backend Tests / Tests (plugin) (push) Has been cancelled
Backend Tests / Tests (server) (push) Has been cancelled
Backend Tests / Tests (store) (push) Has been cancelled
Build Canary Image / build-frontend (push) Has been cancelled
Build Canary Image / build-push (linux/amd64) (push) Has been cancelled
Build Canary Image / build-push (linux/arm64) (push) Has been cancelled
Build Canary Image / merge (push) Has been cancelled
Frontend Tests / Lint (push) Has been cancelled
Frontend Tests / Build (push) Has been cancelled
Proto Linter / Lint Protos (push) Has been cancelled
96 lines
1.9 KiB
Protocol Buffer
96 lines
1.9 KiB
Protocol Buffer
syntax = "proto3";
|
|
|
|
package memos.api.v1;
|
|
|
|
import "google/api/annotations.proto";
|
|
import "google/protobuf/empty.proto";
|
|
|
|
option go_package = "gen/api/v1";
|
|
|
|
// AIService provides AI-related functionality
|
|
service AIService {
|
|
// Get AI provider settings
|
|
rpc GetAISettings(GetAISettingsRequest) returns (AISettings) {
|
|
option (google.api.http) = {
|
|
get: "/api/v1/ai/settings"
|
|
};
|
|
}
|
|
|
|
// Update AI provider settings
|
|
rpc UpdateAISettings(UpdateAISettingsRequest) returns (AISettings) {
|
|
option (google.api.http) = {
|
|
patch: "/api/v1/ai/settings"
|
|
body: "settings"
|
|
};
|
|
}
|
|
|
|
// Test AI provider connection
|
|
rpc TestAIProvider(TestAIProviderRequest) returns (TestAIProviderResponse) {
|
|
option (google.api.http) = {
|
|
post: "/api/v1/ai/test"
|
|
body: "*"
|
|
};
|
|
}
|
|
|
|
// Generate text completion
|
|
rpc GenerateCompletion(GenerateCompletionRequest) returns (GenerateCompletionResponse) {
|
|
option (google.api.http) = {
|
|
post: "/api/v1/ai/completion"
|
|
body: "*"
|
|
};
|
|
}
|
|
}
|
|
|
|
message AISettings {
|
|
GroqSettings groq = 1;
|
|
OllamaSettings ollama = 2;
|
|
}
|
|
|
|
message GroqSettings {
|
|
string api_key = 1;
|
|
string default_model = 2;
|
|
bool enabled = 3;
|
|
}
|
|
|
|
message OllamaSettings {
|
|
string host = 1;
|
|
string default_model = 2;
|
|
bool enabled = 3;
|
|
}
|
|
|
|
message GetAISettingsRequest {}
|
|
|
|
message UpdateAISettingsRequest {
|
|
AISettings settings = 1;
|
|
}
|
|
|
|
message TestAIProviderRequest {
|
|
ProviderType provider = 1;
|
|
}
|
|
|
|
message TestAIProviderResponse {
|
|
bool success = 1;
|
|
string message = 2;
|
|
}
|
|
|
|
message GenerateCompletionRequest {
|
|
ProviderType provider = 1;
|
|
string prompt = 2;
|
|
string model = 3;
|
|
double temperature = 4;
|
|
int32 max_tokens = 5;
|
|
}
|
|
|
|
message GenerateCompletionResponse {
|
|
string text = 1;
|
|
string model_used = 2;
|
|
int32 prompt_tokens = 3;
|
|
int32 completion_tokens = 4;
|
|
int32 total_tokens = 5;
|
|
}
|
|
|
|
enum ProviderType {
|
|
PROVIDER_TYPE_UNSPECIFIED = 0;
|
|
PROVIDER_TYPE_GROQ = 1;
|
|
PROVIDER_TYPE_OLLAMA = 2;
|
|
} |