first commit
Some checks failed
Backend Tests / Static Checks (push) Has been cancelled
Backend Tests / Tests (other) (push) Has been cancelled
Backend Tests / Tests (plugin) (push) Has been cancelled
Backend Tests / Tests (server) (push) Has been cancelled
Backend Tests / Tests (store) (push) Has been cancelled
Build Canary Image / build-frontend (push) Has been cancelled
Build Canary Image / build-push (linux/amd64) (push) Has been cancelled
Build Canary Image / build-push (linux/arm64) (push) Has been cancelled
Build Canary Image / merge (push) Has been cancelled
Frontend Tests / Lint (push) Has been cancelled
Frontend Tests / Build (push) Has been cancelled
Proto Linter / Lint Protos (push) Has been cancelled
Some checks failed
Backend Tests / Static Checks (push) Has been cancelled
Backend Tests / Tests (other) (push) Has been cancelled
Backend Tests / Tests (plugin) (push) Has been cancelled
Backend Tests / Tests (server) (push) Has been cancelled
Backend Tests / Tests (store) (push) Has been cancelled
Build Canary Image / build-frontend (push) Has been cancelled
Build Canary Image / build-push (linux/amd64) (push) Has been cancelled
Build Canary Image / build-push (linux/arm64) (push) Has been cancelled
Build Canary Image / merge (push) Has been cancelled
Frontend Tests / Lint (push) Has been cancelled
Frontend Tests / Build (push) Has been cancelled
Proto Linter / Lint Protos (push) Has been cancelled
This commit is contained in:
88
server/runner/memopayload/runner.go
Normal file
88
server/runner/memopayload/runner.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package memopayload
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/usememos/memos/plugin/markdown"
|
||||
storepb "github.com/usememos/memos/proto/gen/store"
|
||||
"github.com/usememos/memos/store"
|
||||
)
|
||||
|
||||
type Runner struct {
|
||||
Store *store.Store
|
||||
MarkdownService markdown.Service
|
||||
}
|
||||
|
||||
func NewRunner(store *store.Store, markdownService markdown.Service) *Runner {
|
||||
return &Runner{
|
||||
Store: store,
|
||||
MarkdownService: markdownService,
|
||||
}
|
||||
}
|
||||
|
||||
// RunOnce rebuilds the payload of all memos.
|
||||
func (r *Runner) RunOnce(ctx context.Context) {
|
||||
// Process memos in batches to avoid loading all memos into memory at once
|
||||
const batchSize = 100
|
||||
offset := 0
|
||||
processed := 0
|
||||
|
||||
for {
|
||||
limit := batchSize
|
||||
memos, err := r.Store.ListMemos(ctx, &store.FindMemo{
|
||||
Limit: &limit,
|
||||
Offset: &offset,
|
||||
})
|
||||
if err != nil {
|
||||
slog.Error("failed to list memos", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Break if no more memos
|
||||
if len(memos) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Process batch
|
||||
batchSuccessCount := 0
|
||||
for _, memo := range memos {
|
||||
if err := RebuildMemoPayload(memo, r.MarkdownService); err != nil {
|
||||
slog.Error("failed to rebuild memo payload", "err", err, "memoID", memo.ID)
|
||||
continue
|
||||
}
|
||||
if err := r.Store.UpdateMemo(ctx, &store.UpdateMemo{
|
||||
ID: memo.ID,
|
||||
Payload: memo.Payload,
|
||||
}); err != nil {
|
||||
slog.Error("failed to update memo", "err", err, "memoID", memo.ID)
|
||||
continue
|
||||
}
|
||||
batchSuccessCount++
|
||||
}
|
||||
|
||||
processed += len(memos)
|
||||
slog.Info("Processed memo batch", "batchSize", len(memos), "successCount", batchSuccessCount, "totalProcessed", processed)
|
||||
|
||||
// Move to next batch
|
||||
offset += len(memos)
|
||||
}
|
||||
}
|
||||
|
||||
func RebuildMemoPayload(memo *store.Memo, markdownService markdown.Service) error {
|
||||
if memo.Payload == nil {
|
||||
memo.Payload = &storepb.MemoPayload{}
|
||||
}
|
||||
|
||||
// Use goldmark service to extract all metadata in a single pass (more efficient)
|
||||
data, err := markdownService.ExtractAll([]byte(memo.Content))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to extract markdown metadata")
|
||||
}
|
||||
|
||||
memo.Payload.Tags = data.Tags
|
||||
memo.Payload.Property = data.Property
|
||||
return nil
|
||||
}
|
||||
134
server/runner/s3presign/runner.go
Normal file
134
server/runner/s3presign/runner.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package s3presign
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/usememos/memos/plugin/storage/s3"
|
||||
storepb "github.com/usememos/memos/proto/gen/store"
|
||||
"github.com/usememos/memos/store"
|
||||
)
|
||||
|
||||
type Runner struct {
|
||||
Store *store.Store
|
||||
}
|
||||
|
||||
func NewRunner(store *store.Store) *Runner {
|
||||
return &Runner{
|
||||
Store: store,
|
||||
}
|
||||
}
|
||||
|
||||
// Schedule runner every 24 hours for better performance.
|
||||
const runnerInterval = time.Hour * 24
|
||||
|
||||
func (r *Runner) Run(ctx context.Context) {
|
||||
ticker := time.NewTicker(runnerInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
r.RunOnce(ctx)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Runner) RunOnce(ctx context.Context) {
|
||||
r.CheckAndPresign(ctx)
|
||||
}
|
||||
|
||||
func (r *Runner) CheckAndPresign(ctx context.Context) {
|
||||
instanceStorageSetting, err := r.Store.GetInstanceStorageSetting(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
s3StorageType := storepb.AttachmentStorageType_S3
|
||||
// Limit attachments to a reasonable batch size
|
||||
const batchSize = 100
|
||||
offset := 0
|
||||
|
||||
for {
|
||||
limit := batchSize
|
||||
attachments, err := r.Store.ListAttachments(ctx, &store.FindAttachment{
|
||||
GetBlob: false,
|
||||
StorageType: &s3StorageType,
|
||||
Limit: &limit,
|
||||
Offset: &offset,
|
||||
})
|
||||
if err != nil {
|
||||
slog.Error("Failed to list attachments for presigning", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Break if no more attachments
|
||||
if len(attachments) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Process batch of attachments
|
||||
presignCount := 0
|
||||
for _, attachment := range attachments {
|
||||
s3ObjectPayload := attachment.Payload.GetS3Object()
|
||||
if s3ObjectPayload == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if s3ObjectPayload.LastPresignedTime != nil {
|
||||
// Skip if the presigned URL is still valid for the next 4 days.
|
||||
// The expiration time is set to 5 days.
|
||||
if time.Now().Before(s3ObjectPayload.LastPresignedTime.AsTime().Add(4 * 24 * time.Hour)) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
s3Config := instanceStorageSetting.GetS3Config()
|
||||
if s3ObjectPayload.S3Config != nil {
|
||||
s3Config = s3ObjectPayload.S3Config
|
||||
}
|
||||
if s3Config == nil {
|
||||
slog.Error("S3 config is not found")
|
||||
continue
|
||||
}
|
||||
|
||||
s3Client, err := s3.NewClient(ctx, s3Config)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create S3 client", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
presignURL, err := s3Client.PresignGetObject(ctx, s3ObjectPayload.Key)
|
||||
if err != nil {
|
||||
slog.Error("Failed to presign URL", "error", err, "attachmentID", attachment.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
s3ObjectPayload.S3Config = s3Config
|
||||
s3ObjectPayload.LastPresignedTime = timestamppb.New(time.Now())
|
||||
if err := r.Store.UpdateAttachment(ctx, &store.UpdateAttachment{
|
||||
ID: attachment.ID,
|
||||
Reference: &presignURL,
|
||||
Payload: &storepb.AttachmentPayload{
|
||||
Payload: &storepb.AttachmentPayload_S3Object_{
|
||||
S3Object: s3ObjectPayload,
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
slog.Error("Failed to update attachment", "error", err, "attachmentID", attachment.ID)
|
||||
continue
|
||||
}
|
||||
presignCount++
|
||||
}
|
||||
|
||||
slog.Info("Presigned batch of S3 attachments", "batchSize", len(attachments), "presigned", presignCount)
|
||||
|
||||
// Move to next batch
|
||||
offset += len(attachments)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user