diff --git a/src/cmd/cli/command/compose.go b/src/cmd/cli/command/compose.go index a7f55b7e6..1da75206b 100644 --- a/src/cmd/cli/command/compose.go +++ b/src/cmd/cli/command/compose.go @@ -153,15 +153,17 @@ func makeComposeUpCmd() *cobra.Command { term.Info("Tailing logs for", tailSource, "; press Ctrl+C to detach:") tailOptions := newTailOptionsForDeploy(deploy.Etag, since, verbose) - serviceStates, err := cli.TailAndMonitor(ctx, project, provider, time.Duration(waitTimeout)*time.Second, tailOptions) + serviceStates, logCache, err := cli.TailAndMonitor(ctx, project, provider, time.Duration(waitTimeout)*time.Second, tailOptions) if err != nil { - handleTailAndMonitorErr(ctx, err, client, cli.DebugConfig{ + logs := logCache.Get() + handleTailAndMonitorErr(ctx, err, logs, client, cli.DebugConfig{ Deployment: deploy.Etag, ModelId: modelId, Project: project, Provider: provider, Since: since, }) + return err } @@ -224,7 +226,7 @@ func handleComposeUpErr(ctx context.Context, err error, project *compose.Project return cli.InteractiveDebugForClientError(ctx, client, project, err) } -func handleTailAndMonitorErr(ctx context.Context, err error, client *cliClient.GrpcClient, debugConfig cli.DebugConfig) { +func handleTailAndMonitorErr(ctx context.Context, err error, logs []string, client *cliClient.GrpcClient, debugConfig cli.DebugConfig) { var errDeploymentFailed cliClient.ErrDeploymentFailed if errors.As(err, &errDeploymentFailed) { // Tail got canceled because of deployment failure: prompt to show the debugger @@ -235,7 +237,13 @@ func handleTailAndMonitorErr(ctx context.Context, err error, client *cliClient.G if nonInteractive { printDefangHint("To debug the deployment, do:", debugConfig.String()) } else { - track.Evt("Debug Prompted", P("failedServices", debugConfig.FailedServices), P("etag", debugConfig.Deployment), P("reason", errDeploymentFailed)) + props := track.MakeEventLogProperties("logs", logs) + props = append(props, + P("failedServices", debugConfig.FailedServices), + P("etag", debugConfig.Deployment), + P("reason", errDeploymentFailed), + ) + track.Evt("Debug Prompted", props...) // Call the AI debug endpoint using the original command context (not the tail ctx which is canceled) if nil != cli.InteractiveDebugDeployment(ctx, client, debugConfig) { @@ -248,7 +256,7 @@ func handleTailAndMonitorErr(ctx context.Context, err error, client *cliClient.G } func newTailOptionsForDeploy(deployment string, since time.Time, verbose bool) cli.TailOptions { - return cli.TailOptions{ + tailOpt := cli.TailOptions{ Deployment: deployment, LogType: logs.LogTypeAll, // TODO: Move this to playground provider GetDeploymentStatus @@ -266,6 +274,8 @@ func newTailOptionsForDeploy(deployment string, since time.Time, verbose bool) c Since: since, Verbose: verbose, } + + return tailOpt } func flushWarnings() { @@ -616,7 +626,9 @@ func handleLogsCmd(cmd *cobra.Command, args []string) error { Verbose: verbose, Follow: follow, } - return cli.Tail(cmd.Context(), provider, projectName, tailOptions) + + _, err = cli.Tail(cmd.Context(), provider, projectName, tailOptions) + return err } func setupComposeCommand() *cobra.Command { diff --git a/src/pkg/circularbuffer/circularbuffer.go b/src/pkg/circularbuffer/circularbuffer.go new file mode 100644 index 000000000..4ec990d5e --- /dev/null +++ b/src/pkg/circularbuffer/circularbuffer.go @@ -0,0 +1,52 @@ +package circularbuffer + +// BufferInterface abstracts the buffer operations +type BufferInterface[T any] interface { + Add(item T) + Get() []T +} + +type CircularBuffer[T any] struct { + size int + entries int + index int + data []T +} + +func (c *CircularBuffer[T]) Add(item T) { + c.entries++ + c.data[c.index] = item + c.index = (c.index + 1) % c.size +} + +func (c *CircularBuffer[T]) Get() []T { + maxItems := min(c.entries, c.size) + items := make([]T, maxItems) + startIdx := c.index + + // the c.index points to the next write position (ie. oldest entry) if the buffer is full, + // otherwise if the buffer is not full then c.index does not point to the older entry so we + // need to start from index 0 + if c.entries < c.size { + startIdx = 0 + } + + // Collect items in chronological order + for i := range maxItems { + idx := (startIdx + i) % c.size + items[i] = c.data[idx] + } + return items +} + +func NewCircularBuffer[T any](bufferSize int) *CircularBuffer[T] { + if bufferSize <= 0 { + panic("failed to created a circular buffer: cannot have zero elements") + } + return &CircularBuffer[T]{ + size: bufferSize, + entries: 0, + index: 0, + data: make([]T, bufferSize), + } +} diff --git a/src/pkg/circularbuffer/circularbuffer_test.go b/src/pkg/circularbuffer/circularbuffer_test.go new file mode 100644 index 000000000..166a0775b --- /dev/null +++ b/src/pkg/circularbuffer/circularbuffer_test.go @@ -0,0 +1,23 @@ +package circularbuffer + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCircularBuffer(t *testing.T) { + buffer := NewCircularBuffer[int](3) + + assert.Equal(t, []int{}, buffer.Get()) + buffer.Add(1) + assert.Equal(t, []int{1}, buffer.Get()) + buffer.Add(2) + assert.Equal(t, []int{1, 2}, buffer.Get()) + buffer.Add(3) + assert.Equal(t, []int{1, 2, 3}, buffer.Get()) + buffer.Add(4) + assert.Equal(t, []int{2, 3, 4}, buffer.Get()) + buffer.Add(5) + assert.Equal(t, []int{3, 4, 5}, buffer.Get()) +} diff --git a/src/pkg/cli/bootstrap.go b/src/pkg/cli/bootstrap.go index 79eb01893..89ab6aa9c 100644 --- a/src/pkg/cli/bootstrap.go +++ b/src/pkg/cli/bootstrap.go @@ -53,7 +53,7 @@ func TailAndWaitForCD(ctx context.Context, provider client.Provider, projectName // blocking call to tail var tailErr error - if err := streamLogs(ctx, provider, projectName, tailOptions, logEntryPrintHandler); err != nil { + if _, err := streamLogs(ctx, provider, projectName, tailOptions, logEntryPrintHandler); err != nil { term.Debug("Tail stopped with", err, errors.Unwrap(err)) if !errors.Is(err, context.Canceled) { tailErr = err diff --git a/src/pkg/cli/composeUp_test.go b/src/pkg/cli/composeUp_test.go index fb873faac..6e44ed4d4 100644 --- a/src/pkg/cli/composeUp_test.go +++ b/src/pkg/cli/composeUp_test.go @@ -291,7 +291,7 @@ func TestComposeUpStops(t *testing.T) { timer := time.AfterFunc(time.Second, func() { provider.subscribeStream.Send(tt.svcFailed, tt.subscribeErr) }) t.Cleanup(func() { timer.Stop() }) } - _, err = TailAndMonitor(ctx, project, provider, -1, TailOptions{Deployment: resp.Etag}) + _, _, err = TailAndMonitor(ctx, project, provider, -1, TailOptions{Deployment: resp.Etag}) if err != nil { if err.Error() != tt.wantError { t.Errorf("expected error: %v, got: %v", tt.wantError, err) diff --git a/src/pkg/cli/estimate.go b/src/pkg/cli/estimate.go index 0e77afd5d..1d44008f3 100644 --- a/src/pkg/cli/estimate.go +++ b/src/pkg/cli/estimate.go @@ -76,7 +76,7 @@ func GeneratePreview(ctx context.Context, project *compose.Project, client clien Verbose: true, } - err = streamLogs(ctx, previewProvider, project.Name, tailOptions, func(entry *defangv1.LogEntry, options *TailOptions, t *term.Term) error { + _, err = streamLogs(ctx, previewProvider, project.Name, tailOptions, func(entry *defangv1.LogEntry, options *TailOptions, t *term.Term) error { if strings.HasPrefix(entry.Message, "Preview succeeded") { return io.EOF } else if strings.HasPrefix(entry.Message, "Preview failed") { diff --git a/src/pkg/cli/tail.go b/src/pkg/cli/tail.go index 6e24e2cb8..348563730 100644 --- a/src/pkg/cli/tail.go +++ b/src/pkg/cli/tail.go @@ -14,6 +14,7 @@ import ( "time" "github.com/DefangLabs/defang/src/pkg" + "github.com/DefangLabs/defang/src/pkg/circularbuffer" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/dryrun" "github.com/DefangLabs/defang/src/pkg/logs" @@ -143,7 +144,7 @@ func (cerr CancelError) Unwrap() error { return cerr.error } -func Tail(ctx context.Context, provider client.Provider, projectName string, options TailOptions) error { +func Tail(ctx context.Context, provider client.Provider, projectName string, options TailOptions) (circularbuffer.BufferInterface[string], error) { if options.LogType == logs.LogTypeUnspecified { options.LogType = logs.LogTypeAll } @@ -167,7 +168,7 @@ func Tail(ctx context.Context, provider client.Provider, projectName string, opt } if dryrun.DoDryRun { - return dryrun.ErrDryRun + return nil, dryrun.ErrDryRun } return streamLogs(ctx, provider, projectName, options, logEntryPrintHandler) @@ -208,7 +209,7 @@ type LogEntryHandler func(*defangv1.LogEntry, *TailOptions, *term.Term) error const DefaultTailLimit = 100 -func streamLogs(ctx context.Context, provider client.Provider, projectName string, options TailOptions, handler LogEntryHandler) error { +func streamLogs(ctx context.Context, provider client.Provider, projectName string, options TailOptions, handler LogEntryHandler) (circularbuffer.BufferInterface[string], error) { var sinceTs, untilTs *timestamppb.Timestamp if pkg.IsValidTime(options.Since) { sinceTs = timestamppb.New(options.Since) @@ -247,7 +248,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin serverStream, err := provider.QueryLogs(ctx, tailRequest) if err != nil { - return err + return nil, err } ctx, cancel := context.WithCancel(ctx) @@ -313,16 +314,17 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin return receiveLogs(ctx, provider, projectName, tailRequest, serverStream, &options, doSpinner, handler) } -func receiveLogs(ctx context.Context, provider client.Provider, projectName string, tailRequest *defangv1.TailRequest, serverStream client.ServerStream[defangv1.TailResponse], options *TailOptions, doSpinner bool, handler LogEntryHandler) error { +func receiveLogs(ctx context.Context, provider client.Provider, projectName string, tailRequest *defangv1.TailRequest, serverStream client.ServerStream[defangv1.TailResponse], options *TailOptions, doSpinner bool, handler LogEntryHandler) (circularbuffer.BufferInterface[string], error) { + logCache := circularbuffer.NewCircularBuffer[string](30) skipDuplicate := false var err error for { if !serverStream.Receive() { if errors.Is(serverStream.Err(), context.Canceled) || errors.Is(serverStream.Err(), context.DeadlineExceeded) { - return &CancelError{TailOptions: *options, error: serverStream.Err(), ProjectName: projectName} + return logCache, &CancelError{TailOptions: *options, error: serverStream.Err(), ProjectName: projectName} } if errors.Is(serverStream.Err(), io.EOF) { - return serverStream.Err() + return nil, serverStream.Err() } // Reconnect on Error: internal: stream error: stream ID 5; INTERNAL_ERROR; received from peer @@ -333,13 +335,13 @@ func receiveLogs(ctx context.Context, provider client.Provider, projectName stri spaces, _ = term.Warnf("Reconnecting...\r") // overwritten below } if err := provider.DelayBeforeRetry(ctx); err != nil { - return err + return logCache, err } tailRequest.Since = timestamppb.New(options.Since) serverStream, err = provider.QueryLogs(ctx, tailRequest) if err != nil { term.Debug("Reconnect failed:", err) - return err + return logCache, err } if !options.Raw { term.Printf("%*s", spaces, "\r") // clear the "reconnecting" message @@ -348,7 +350,7 @@ func receiveLogs(ctx context.Context, provider client.Provider, projectName stri continue } - return serverStream.Err() // returns nil on EOF + return logCache, serverStream.Err() // returns nil on EOF } msg := serverStream.Msg() @@ -356,13 +358,13 @@ func receiveLogs(ctx context.Context, provider client.Provider, projectName stri continue } - if err = handleLogEntryMsgs(msg, doSpinner, skipDuplicate, options, handler); err != nil { - return err + if err = handleLogEntryMsgs(msg, logCache, doSpinner, skipDuplicate, options, handler); err != nil { + return nil, err } } } -func handleLogEntryMsgs(msg *defangv1.TailResponse, doSpinner bool, skipDuplicate bool, options *TailOptions, handler LogEntryHandler) error { +func handleLogEntryMsgs(msg *defangv1.TailResponse, logCache circularbuffer.BufferInterface[string], doSpinner bool, skipDuplicate bool, options *TailOptions, handler LogEntryHandler) error { for _, e := range msg.Entries { // Replace service progress messages with our own spinner if doSpinner && isProgressDot(e.Message) { @@ -382,6 +384,8 @@ func handleLogEntryMsgs(msg *defangv1.TailResponse, doSpinner bool, skipDuplicat options.Since = ts } + logCache.Add(e.Message) + err := handler(e, options, term.DefaultTerm) if err != nil { term.Debug("Ending tail loop", err) diff --git a/src/pkg/cli/tailAndMonitor.go b/src/pkg/cli/tailAndMonitor.go index df27549a4..e9698091f 100644 --- a/src/pkg/cli/tailAndMonitor.go +++ b/src/pkg/cli/tailAndMonitor.go @@ -8,6 +8,7 @@ import ( "time" "github.com/DefangLabs/defang/src/pkg" + "github.com/DefangLabs/defang/src/pkg/circularbuffer" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/compose" "github.com/DefangLabs/defang/src/pkg/term" @@ -17,7 +18,7 @@ import ( const targetServiceState = defangv1.ServiceState_DEPLOYMENT_COMPLETED -func TailAndMonitor(ctx context.Context, project *compose.Project, provider client.Provider, waitTimeout time.Duration, tailOptions TailOptions) (ServiceStates, error) { +func TailAndMonitor(ctx context.Context, project *compose.Project, provider client.Provider, waitTimeout time.Duration, tailOptions TailOptions) (ServiceStates, circularbuffer.BufferInterface[string], error) { tailOptions.Follow = true if tailOptions.Deployment == "" { panic("tailOptions.Deployment must be a valid deployment ID") @@ -67,8 +68,9 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie }() // blocking call to tail - var tailErr error - if err := Tail(tailCtx, provider, project.Name, tailOptions); err != nil { + var err, tailErr error + var logCache circularbuffer.BufferInterface[string] + if logCache, err = Tail(tailCtx, provider, project.Name, tailOptions); err != nil { term.Debug("Tail stopped with", err, errors.Unwrap(err)) if connect.CodeOf(err) == connect.CodePermissionDenied { @@ -99,7 +101,7 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie } } - return serviceStates, errors.Join(cdErr, svcErr, tailErr) + return serviceStates, logCache, errors.Join(cdErr, svcErr, tailErr) } func CanMonitorService(service compose.ServiceConfig) bool { diff --git a/src/pkg/cli/tail_test.go b/src/pkg/cli/tail_test.go index 420f902ae..a38e424b5 100644 --- a/src/pkg/cli/tail_test.go +++ b/src/pkg/cli/tail_test.go @@ -152,7 +152,7 @@ func TestTail(t *testing.T) { }, } - err := Tail(t.Context(), p, projectName, TailOptions{Verbose: true}) // Output host + _, err := Tail(t.Context(), p, projectName, TailOptions{Verbose: true}) // Output host if err != io.EOF { t.Errorf("Tail() error = %v, want io.EOF", err) } @@ -251,7 +251,7 @@ func TestUTC(t *testing.T) { localMock = localMock.MockTimestamp(localTime) // Start the terminal for local time test - err := Tail(t.Context(), localMock, projectName, TailOptions{Verbose: true}) // Output host + _, err := Tail(t.Context(), localMock, projectName, TailOptions{Verbose: true}) // Output host if err != nil { t.Errorf("Tail() error = %v, want io.EOF", err) } @@ -283,7 +283,7 @@ func TestUTC(t *testing.T) { utcMock := &mockTailProvider{} utcMock = utcMock.MockTimestamp(utcTime) - err = Tail(t.Context(), utcMock, projectName, TailOptions{Verbose: true}) + _, err = Tail(t.Context(), utcMock, projectName, TailOptions{Verbose: true}) if err != nil { t.Errorf("Tail() error = %v, want io.EOF", err) } @@ -332,7 +332,7 @@ func TestTailError(t *testing.T) { mock := &mockQueryErrorProvider{ TailStreamError: tt.err, } - err := Tail(t.Context(), mock, "project", tailOptions) + _, err := Tail(t.Context(), mock, "project", tailOptions) if err != nil { if err.Error() != tt.wantError { t.Errorf("Tail() error = %q, want: %q", err.Error(), tt.wantError) @@ -368,7 +368,7 @@ func TestTailContext(t *testing.T) { time.AfterFunc(10*time.Millisecond, func() { mock.tailStream.Send(nil, tt.cause) }) - err := Tail(ctx, mock, "project", tailOptions) + _, err := Tail(ctx, mock, "project", tailOptions) if err.Error() != tt.wantError { t.Errorf("Tail() error = %q, want: %q", err.Error(), tt.wantError) } diff --git a/src/pkg/mcp/tools/default_tool_cli.go b/src/pkg/mcp/tools/default_tool_cli.go index 4f2f600e0..ef2096072 100644 --- a/src/pkg/mcp/tools/default_tool_cli.go +++ b/src/pkg/mcp/tools/default_tool_cli.go @@ -6,6 +6,7 @@ import ( "os" "strconv" + "github.com/DefangLabs/defang/src/pkg/circularbuffer" "github.com/DefangLabs/defang/src/pkg/cli" cliClient "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/compose" @@ -49,7 +50,7 @@ func (DefaultToolCLI) ComposeUp(ctx context.Context, project *compose.Project, c return cli.ComposeUp(ctx, project, client, provider, uploadMode, mode) } -func (DefaultToolCLI) Tail(ctx context.Context, provider cliClient.Provider, project *compose.Project, options cli.TailOptions) error { +func (DefaultToolCLI) Tail(ctx context.Context, provider cliClient.Provider, project *compose.Project, options cli.TailOptions) (circularbuffer.BufferInterface[string], error) { return cli.Tail(ctx, provider, project.Name, options) } diff --git a/src/pkg/mcp/tools/interfaces.go b/src/pkg/mcp/tools/interfaces.go index 670be7bd6..e23c7fe2e 100644 --- a/src/pkg/mcp/tools/interfaces.go +++ b/src/pkg/mcp/tools/interfaces.go @@ -4,6 +4,7 @@ package tools import ( "context" + "github.com/DefangLabs/defang/src/pkg/circularbuffer" cliTypes "github.com/DefangLabs/defang/src/pkg/cli" cliClient "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/compose" @@ -31,5 +32,5 @@ type CLIInterface interface { OpenBrowser(url string) error PrintEstimate(mode modes.Mode, estimate *defangv1.EstimateResponse) string RunEstimate(ctx context.Context, project *compose.Project, client *cliClient.GrpcClient, provider cliClient.Provider, providerId cliClient.ProviderID, region string, mode modes.Mode) (*defangv1.EstimateResponse, error) - Tail(ctx context.Context, provider cliClient.Provider, project *compose.Project, options cliTypes.TailOptions) error + Tail(ctx context.Context, provider cliClient.Provider, project *compose.Project, options cliTypes.TailOptions) (circularbuffer.BufferInterface[string], error) } diff --git a/src/pkg/mcp/tools/logs.go b/src/pkg/mcp/tools/logs.go index ef24d2e78..49bbacba2 100644 --- a/src/pkg/mcp/tools/logs.go +++ b/src/pkg/mcp/tools/logs.go @@ -70,7 +70,7 @@ func handleLogsTool(ctx context.Context, loader cliClient.ProjectLoader, params return "", fmt.Errorf("provider not configured correctly: %w", err) } - err = cli.Tail(ctx, provider, project, cliTypes.TailOptions{ + _, err = cli.Tail(ctx, provider, project, cliTypes.TailOptions{ Deployment: params.DeploymentID, Since: params.Since, Until: params.Until, diff --git a/src/pkg/track/track.go b/src/pkg/track/track.go index ea60eac79..b4a3ea4a8 100644 --- a/src/pkg/track/track.go +++ b/src/pkg/track/track.go @@ -1,6 +1,7 @@ package track import ( + "fmt" "strings" "sync" @@ -11,8 +12,12 @@ import ( "github.com/spf13/pflag" ) +const maxPropertyCharacterLength = 255 // chars per property in tracking event + var disableAnalytics = pkg.GetenvBool("DEFANG_DISABLE_ANALYTICS") +const logPropertyNamePrefix = "logs" + type Property = cliClient.Property // P creates a Property with the given name and value. @@ -43,7 +48,17 @@ func Evt(name string, props ...Property) { term.Debugf("untracked event %q: %v", name, props) return } - term.Debugf("tracking event %q: %v", name, props) + + // compose logs may be in the tracking, they can be large so filter them out from debug output + var filteredProps []Property + for _, p := range props { + if strings.HasPrefix(p.Name, logPropertyNamePrefix) { + continue + } + filteredProps = append(filteredProps, p) + } + + term.Debugf("tracking event %q: %v", name, filteredProps) trackWG.Add(1) go func() { defer trackWG.Done() @@ -56,6 +71,21 @@ func FlushAllTracking() { trackWG.Wait() } +// function to break a set of messages into smaller chunks for tracking +// There is a set size limit per property for tracking +func MakeEventLogProperties(name string, message []string) []Property { + var trackMsg []Property + + for i, msg := range message { + if len(msg) > maxPropertyCharacterLength { + msg = msg[:maxPropertyCharacterLength] + } + propName := fmt.Sprintf("%s-%d", name, i+1) + trackMsg = append(trackMsg, P(propName, msg)) + } + return trackMsg +} + func isCompletionCommand(cmd *cobra.Command) bool { return cmd.Name() == cobra.ShellCompRequestCmd || (cmd.Parent() != nil && cmd.Parent().Name() == "completion") } @@ -76,10 +106,12 @@ func Cmd(cmd *cobra.Command, verb string, props ...Property) { command = c.Name() + "-" + command } }) + props = append(props, P("CalledAs", calledAs), P("version", cmd.Root().Version), ) + cmd.Flags().Visit(func(f *pflag.Flag) { props = append(props, P(f.Name, f.Value)) }) diff --git a/src/pkg/track/track_test.go b/src/pkg/track/track_test.go new file mode 100644 index 000000000..808143c80 --- /dev/null +++ b/src/pkg/track/track_test.go @@ -0,0 +1,71 @@ +package track + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEventMessages(t *testing.T) { + tests := []struct { + name string + prefix string + messages []string + expectedEventContents []string + }{ + { + name: "empty messages", + prefix: "logs", + messages: []string{}, + expectedEventContents: []string{}, + }, + { + name: "single message", + prefix: "logs", + messages: []string{"msg"}, + expectedEventContents: []string{"msg"}, + }, + { + name: "three messages - three events", + prefix: "logs", + messages: []string{"1", "2", "3"}, + expectedEventContents: []string{"1", "2", "3"}, + }, + { + name: "long message- truncated", + prefix: "logs", + messages: []string{"012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"}, + expectedEventContents: []string{"012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := MakeEventLogProperties(tt.prefix, tt.messages) + + // Check number of event properties created + assert.Equal(t, len(tt.expectedEventContents), len(result), "incorrect number of event properties") + + if len(tt.messages) == 0 { + return // No more checks needed for empty input + } + + // Verify event properties + for i, prop := range result { + // Check property name format + expectedName := fmt.Sprintf("%s-%d", tt.prefix, i+1) + assert.Equal(t, expectedName, prop.Name, "incorrect property name") + + // Check that value is string + propValue, ok := prop.Value.(string) + assert.True(t, ok, "property value should be string") + + // Check size + if len(tt.messages[i]) > maxPropertyCharacterLength && len(propValue) != maxPropertyCharacterLength { + assert.Less(t, len(propValue), maxPropertyCharacterLength, "property value exceeds maxPropertyCharacterLength at %d", len(propValue)) + } + } + }) + } +}