diff --git a/.github/workflows/docker-chatbot-rag-app.yml b/.github/workflows/docker-chatbot-rag-app.yml index 9a5c2752..70d61bcd 100644 --- a/.github/workflows/docker-chatbot-rag-app.yml +++ b/.github/workflows/docker-chatbot-rag-app.yml @@ -29,11 +29,12 @@ env: jobs: build-image: + timeout-minutes: 90 # instead of 360 by default strategy: matrix: runner: - ubuntu-24.04 - - ubuntu-24.04-arm + - ubuntu-24.04-arm # we don't test on this as it times out in CI runs-on: ${{ matrix.runner }} steps: - uses: actions/checkout@v4 @@ -55,10 +56,10 @@ jobs: cache-from: type=gha cache-to: type=gha,mode=max - name: start elasticsearch - if: github.event_name == 'pull_request' + if: github.event_name == 'pull_request' && matrix.runner == 'ubuntu-24.04' run: docker compose -f docker/docker-compose-elastic.yml up --quiet-pull -d --wait --wait-timeout 120 elasticsearch - name: test image - if: github.event_name == 'pull_request' + if: github.event_name == 'pull_request' && matrix.runner == 'ubuntu-24.04' working-directory: example-apps/chatbot-rag-app run: | # This tests ELSER is working, which doesn't require an LLM. cp env.example .env diff --git a/docker/README.md b/docker/README.md index 41824c62..82341c72 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,8 +1,10 @@ # Running your own Elastic Stack with Docker -If you'd like to start Elastic locally, you can use the provided -[docker-compose-elastic.yml](docker-compose-elastic.yml) file. This starts -Elasticsearch, Kibana, and APM Server and only requires Docker installed. +If you would like to start a local Elastic Stack with Docker, use +[docker-compose-elastic.yml](docker-compose-elastic.yml). + +This starts Elasticsearch, Kibana and Elastic Distribution of OpenTelemetry +(EDOT) Collector. Note: If you haven't checked out this repository, all you need is one file: ```bash @@ -26,3 +28,21 @@ Clean up when finished, like this: ```bash docker compose -f docker-compose-elastic.yml down ``` + +## OpenTelemetry + +### Metrics + +If your application only sends logs or traces, you can skip this section. + +EDOT Collector supports delta, not cumulative metrics. Applications that send +OpenTelemetry metrics using the official OTEL SDK need to export this variable: +```bash +OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE=delta +``` + +Alternatively, you can use [EDOT language SDKs][edot-sdks] which set this by +default. + +--- +[edot-sdks]: https://github.com/elastic/opentelemetry?tab=readme-ov-file#edot-sdks--agents diff --git a/docker/docker-compose-elastic.yml b/docker/docker-compose-elastic.yml index 9dbacf8a..1f2e92c8 100644 --- a/docker/docker-compose-elastic.yml +++ b/docker/docker-compose-elastic.yml @@ -1,8 +1,69 @@ +# This is a Docker Compose file that runs a local Elastic Stack comprised of +# Elasticsearch, Kibana and Elastic Distribution of OpenTelemetry (EDOT) +# Collector. name: elastic-stack +configs: + # This is the minimal yaml configuration needed to listen on all interfaces + # for OTLP logs, metrics and traces, exporting to Elasticsearch. + edot-collector-config: + content: | + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + + connectors: + elasticapm: + + processors: + elastictrace: + + exporters: + elasticsearch: + endpoint: http://elasticsearch:9200 + user: elastic + password: elastic + mapping: + mode: otel + logs_dynamic_index: + enabled: true + metrics_dynamic_index: + enabled: true + traces_dynamic_index: + enabled: true + flush: + bytes: 1048576 # apm-server default instead of 5000000 + interval: 1s # apm-server default instead of 30s + + service: + pipelines: + traces: + receivers: [otlp] + processors: [elastictrace] + exporters: [elasticapm, elasticsearch] + + metrics: + receivers: [otlp] + processors: [] + exporters: [elasticsearch] + + metrics/aggregated: + receivers: [elasticapm] + processors: [] + exporters: [elasticsearch] + + logs: + receivers: [otlp] + processors: [] + exporters: [elasticapm, elasticsearch] + services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 + image: docker.elastic.co/elasticsearch/elasticsearch:9.0.0 container_name: elasticsearch ports: - 9200:9200 @@ -38,7 +99,7 @@ services: depends_on: elasticsearch: condition: service_healthy - image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 + image: docker.elastic.co/elasticsearch/elasticsearch:9.0.0 container_name: elasticsearch_settings restart: 'no' # gen-ai assistants in kibana save state in a way that requires system @@ -53,7 +114,7 @@ services: ' kibana: - image: docker.elastic.co/kibana/kibana:8.17.2 + image: docker.elastic.co/kibana/kibana:9.0.0 container_name: kibana depends_on: elasticsearch_settings: @@ -76,27 +137,22 @@ services: retries: 300 interval: 1s - apm-server: - image: docker.elastic.co/apm/apm-server:8.17.2 - container_name: apm-server + otel-collector: + image: docker.elastic.co/elastic-agent/elastic-otel-collector:9.0.0 + container_name: otel-collector depends_on: elasticsearch: condition: service_healthy - command: > - apm-server - -E apm-server.kibana.enabled=true - -E apm-server.kibana.host=http://kibana:5601 - -E apm-server.kibana.username=elastic - -E apm-server.kibana.password=elastic - -E output.elasticsearch.hosts=["http://elasticsearch:9200"] - -E output.elasticsearch.username=elastic - -E output.elasticsearch.password=elastic - cap_add: ["CHOWN", "DAC_OVERRIDE", "SETGID", "SETUID"] - cap_drop: ["ALL"] + command: [ + "--config=/etc/otelcol-contrib/config.yaml", + ] + configs: + - source: edot-collector-config + target: /etc/otelcol-contrib/config.yaml ports: - - 8200:8200 + - "4317:4317" # grpc + - "4318:4318" # http healthcheck: - test: ["CMD-SHELL", "bash -c 'echo -n > /dev/tcp/127.0.0.1/8200'"] + test: ["CMD-SHELL", "bash -c 'echo -n > /dev/tcp/127.0.0.1/4317'"] retries: 300 - interval: 1s - + interval: 1s \ No newline at end of file diff --git a/example-apps/chatbot-rag-app/README.md b/example-apps/chatbot-rag-app/README.md index 2f0872f4..1e3512f5 100644 --- a/example-apps/chatbot-rag-app/README.md +++ b/example-apps/chatbot-rag-app/README.md @@ -22,8 +22,9 @@ Copy [env.example](env.example) to `.env` and fill in values noted inside. ## Installing and connecting to Elasticsearch There are a number of ways to install Elasticsearch. Cloud is best for most -use-cases. We also have [docker-compose-elastic.yml][docker-compose-elastic], -that starts Elasticsearch, Kibana, and APM Server on your laptop in one step. +use-cases. We also have [docker-compose-elastic.yml][docker-compose], +that starts Elasticsearch, Kibana, and Elastic Distribution of OpenTelemetry +(EDOT) Collector on your laptop in one step. Once you decided your approach, edit your `.env` file accordingly. @@ -84,8 +85,8 @@ copied to a file name `.env` and updated with `ELASTICSEARCH_URL` and For example, if you started your Elastic Stack with [k8s-manifest-elastic.yml][k8s-manifest-elastic], you would update these values: ``` -ELASTICSEARCH_URL=http://elasticsearch:9200 -OTEL_EXPORTER_OTLP_ENDPOINT=http://apm-server:8200 +ELASTICSEARCH_URL=http://elasticsearch.default.svc:9200 +OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector.default.svc:4318 ``` Then, import your `.env` file as a configmap like this: @@ -132,6 +133,7 @@ kubectl port-forward deployment.apps/chatbot-rag-app 4000:4000 & Clean up when finished, like this: ```bash kubectl delete -f k8s-manifest.yml +kubectl delete configmap chatbot-rag-app-env ``` ### Run with Python @@ -197,16 +199,16 @@ prefix `python` with `opentelemetry-instrument` to enable OpenTelemetry. dotenv run -- opentelemetry-instrument python api/app.py ``` -[env.example](env.example) defaults to use Elastic APM server, started by -[docker-compose-elastic.yml](../../docker). If you start your Elastic stack -this way, you can access Kibana like this, authenticating with the username +[env.example](env.example) defaults to use an OpenTelemetry Collector, +specifically Elastic Distribution of OpenTelemetry (EDOT) Collector, if you +started your Elastic Stack with [docker-compose-elastic.yml][docker-compose]. +If you did, you can access Kibana like this, authenticating with the username "elastic" and password "elastic": http://localhost:5601/app/apm/traces?rangeFrom=now-15m&rangeTo=now -Under the scenes, chatbot-rag-app is automatically instrumented by the Elastic -Distribution of OpenTelemetry (EDOT) Python. You can see more details about -EDOT Python [here](https://github.com/elastic/elastic-otel-python). +Under the scenes, chatbot-rag-app is automatically instrumented by EDOT Python. +You can see more details about EDOT Python [here][edot-python]. OpenTelemetry support for LLM providers not included in EDOT Python are provided by the [Langtrace Python SDK](https://docs.langtrace.ai/sdk/python_sdk). @@ -260,5 +262,6 @@ docker compose up --build --force-recreate --- [loader-docs]: https://python.langchain.com/docs/how_to/#document-loaders [install-es]: https://www.elastic.co/search-labs/tutorials/install-elasticsearch -[docker-compose-elastic]: ../../docker/docker-compose-elastic.yml +[docker-compose]: ../../docker/docker-compose-elastic.yml +[edot-python]: https://github.com/elastic/elastic-otel-python [k8s-manifest-elastic]: ../../k8s/k8s-manifest-elastic.yml diff --git a/example-apps/chatbot-rag-app/env.example b/example-apps/chatbot-rag-app/env.example index 1902064e..74974315 100644 --- a/example-apps/chatbot-rag-app/env.example +++ b/example-apps/chatbot-rag-app/env.example @@ -72,12 +72,13 @@ OTEL_SDK_DISABLED=true # Assign the service name that shows up in Kibana OTEL_SERVICE_NAME=chatbot-rag-app -# Default to send logs, traces and metrics to an Elastic APM server accessible -# via localhost. +# Default to send logs, traces and metrics to an OpenTelemetry collector, +# accessible via localhost. For example, Elastic Distribution of OpenTelemetry +# (EDOT) Collector. # -# When running inside Kubernetes, set to http://elasticsearch.default.svc:9200 +# When running inside Kubernetes, set to http://otel-collector.default.svc:4318 # or similar. -OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:8200 +OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf # Change to 'false' to hide prompt and completion content diff --git a/example-apps/chatbot-rag-app/requirements.txt b/example-apps/chatbot-rag-app/requirements.txt index 3acfe21d..4acb569e 100644 --- a/example-apps/chatbot-rag-app/requirements.txt +++ b/example-apps/chatbot-rag-app/requirements.txt @@ -6,7 +6,7 @@ # aiohappyeyeballs==2.6.1 # via aiohttp -aiohttp==3.11.16 +aiohttp==3.11.18 # via langchain-community aiosignal==1.3.2 # via aiohttp @@ -20,11 +20,11 @@ attrs==25.3.0 # via aiohttp blinker==1.9.0 # via flask -boto3==1.37.28 +boto3==1.38.0 # via # langchain-aws # langtrace-python-sdk -botocore==1.37.28 +botocore==1.38.0 # via # boto3 # s3transfer @@ -41,7 +41,7 @@ charset-normalizer==3.4.1 # via requests click==8.1.8 # via flask -cohere==5.14.2 +cohere==5.15.0 # via langchain-cohere colorama==0.4.6 # via @@ -64,7 +64,7 @@ elastic-opentelemetry==1.0.0 # via -r requirements.in elastic-transport==8.17.1 # via elasticsearch -elasticsearch[vectorstore-mmr]==8.17.2 +elasticsearch[vectorstore-mmr]==8.18.0 # via # -r requirements.in # langchain-elasticsearch @@ -80,7 +80,7 @@ flask==3.1.0 # flask-cors flask-cors==5.0.1 # via -r requirements.in -frozenlist==1.5.0 +frozenlist==1.6.0 # via # aiohttp # aiosignal @@ -95,7 +95,7 @@ google-api-core[grpc]==2.24.2 # google-cloud-core # google-cloud-resource-manager # google-cloud-storage -google-auth==2.38.0 +google-auth==2.39.0 # via # google-api-core # google-cloud-aiplatform @@ -103,7 +103,7 @@ google-auth==2.38.0 # google-cloud-core # google-cloud-resource-manager # google-cloud-storage -google-cloud-aiplatform==1.87.0 +google-cloud-aiplatform==1.90.0 # via langchain-google-vertexai google-cloud-bigquery==3.31.0 # via google-cloud-aiplatform @@ -125,7 +125,7 @@ google-resumable-media==2.7.2 # via # google-cloud-bigquery # google-cloud-storage -googleapis-common-protos[grpc]==1.69.2 +googleapis-common-protos[grpc]==1.70.0 # via # google-api-core # grpc-google-iam-v1 @@ -134,7 +134,7 @@ googleapis-common-protos[grpc]==1.69.2 # opentelemetry-exporter-otlp-proto-http grpc-google-iam-v1==0.14.2 # via google-cloud-resource-manager -grpcio==1.71.0 +grpcio==1.72.0rc1 # via # google-api-core # googleapis-common-protos @@ -147,7 +147,7 @@ h11==0.14.0 # via httpcore halo==0.0.31 # via -r requirements.in -httpcore==1.0.7 +httpcore==1.0.8 # via httpx httpx==0.28.1 # via @@ -162,7 +162,7 @@ httpx-sse==0.4.0 # langchain-community # langchain-google-vertexai # langchain-mistralai -huggingface-hub==0.30.1 +huggingface-hub==0.30.2 # via # tokenizers # transformers @@ -188,17 +188,17 @@ jsonpatch==1.33 # via langchain-core jsonpointer==3.0.0 # via jsonpatch -langchain==0.3.23 +langchain==0.3.24 # via # -r requirements.in # langchain-community -langchain-aws==0.2.18 +langchain-aws==0.2.21 # via -r requirements.in -langchain-cohere==0.4.3 +langchain-cohere==0.4.4 # via -r requirements.in -langchain-community==0.3.21 +langchain-community==0.3.22 # via langchain-cohere -langchain-core==0.3.51 +langchain-core==0.3.55 # via # langchain # langchain-aws @@ -211,20 +211,20 @@ langchain-core==0.3.51 # langchain-text-splitters langchain-elasticsearch==0.3.2 # via -r requirements.in -langchain-google-vertexai==2.0.19 +langchain-google-vertexai==2.0.20 # via -r requirements.in langchain-mistralai==0.2.10 # via -r requirements.in -langchain-openai==0.3.12 +langchain-openai==0.3.14 # via -r requirements.in langchain-text-splitters==0.3.8 # via langchain -langsmith==0.3.24 +langsmith==0.3.33 # via # langchain # langchain-community # langchain-core -langtrace-python-sdk==3.8.11 +langtrace-python-sdk==3.8.15 # via -r requirements.in log-symbols==0.0.14 # via halo @@ -234,20 +234,20 @@ markupsafe==3.0.2 # werkzeug marshmallow==3.26.1 # via dataclasses-json -multidict==6.3.2 +multidict==6.4.3 # via # aiohttp # yarl -mypy-extensions==1.0.0 +mypy-extensions==1.1.0 # via typing-inspect -numpy==2.2.4 +numpy==2.2.5 # via # elasticsearch # langchain-aws # langchain-community # shapely # transformers -openai==1.70.0 +openai==1.75.0 # via langchain-openai opentelemetry-api==1.31.1 # via @@ -352,7 +352,7 @@ pyasn1==0.6.1 # rsa pyasn1-modules==0.4.2 # via google-auth -pydantic==2.11.2 +pydantic==2.11.3 # via # cohere # google-cloud-aiplatform @@ -370,11 +370,12 @@ pydantic-core==2.33.1 # via # cohere # pydantic -pydantic-settings==2.8.1 +pydantic-settings==2.9.1 # via langchain-community python-dateutil==2.9.0.post0 # via # botocore + # elasticsearch # google-cloud-bigquery python-dotenv==1.1.0 # via pydantic-settings @@ -406,13 +407,13 @@ requests==2.32.3 # transformers requests-toolbelt==1.0.0 # via langsmith -rsa==4.9 +rsa==4.9.1 # via google-auth -s3transfer==0.11.4 +s3transfer==0.12.0 # via boto3 safetensors==0.5.3 # via transformers -sentry-sdk==2.25.1 +sentry-sdk==2.26.1 # via langtrace-python-sdk shapely==2.1.0 # via google-cloud-aiplatform @@ -456,16 +457,17 @@ tqdm==4.67.1 # transformers trace-attributes==7.2.1 # via langtrace-python-sdk -transformers==4.51.0 +transformers==4.51.3 # via langtrace-python-sdk types-pyyaml==6.0.12.20250402 # via langchain-cohere types-requests==2.32.0.20250328 # via cohere -typing-extensions==4.13.1 +typing-extensions==4.13.2 # via # anyio # cohere + # elasticsearch # google-cloud-aiplatform # huggingface-hub # langchain-core @@ -480,10 +482,12 @@ typing-extensions==4.13.1 typing-inspect==0.9.0 # via dataclasses-json typing-inspection==0.4.0 - # via pydantic + # via + # pydantic + # pydantic-settings ujson==5.10.0 # via langtrace-python-sdk -urllib3==2.3.0 +urllib3==2.4.0 # via # botocore # elastic-transport @@ -501,7 +505,7 @@ wrapt==1.17.2 # deprecated # opentelemetry-instrumentation # opentelemetry-instrumentation-sqlalchemy -yarl==1.19.0 +yarl==1.20.0 # via aiohttp zipp==3.21.0 # via importlib-metadata diff --git a/k8s/README.md b/k8s/README.md index 93957a8e..2a8a078d 100644 --- a/k8s/README.md +++ b/k8s/README.md @@ -1,8 +1,10 @@ # Running your own Elastic Stack with Kubernetes -If you'd like to start Elastic with Kubernetes, you can use the provided -[manifest-elastic.yml](manifest-elastic.yml) file. This starts -Elasticsearch, Kibana, and APM Server in an existing Kubernetes cluster. +If you would like to start a local Elastic Stack with Kubernetes, use +[manifest-elastic.yml](manifest-elastic.yml). + +This starts Elasticsearch, Kibana and Elastic Distribution of OpenTelemetry +(EDOT) Collector. Note: If you haven't checked out this repository, all you need is one file: ```bash @@ -28,7 +30,7 @@ Elastic Stack version can take a long time due to image pulling. kubectl wait --for=condition=available --timeout=10m \ deployment/elasticsearch \ deployment/kibana \ - deployment/apm-server + deployment/otel-collector ``` Next, forward the kibana port: @@ -45,3 +47,21 @@ Clean up when finished, like this: ```bash kubectl delete -f k8s-manifest-elastic.yml ``` + +## OpenTelemetry + +### Metrics + +If your application only sends logs or traces, you can skip this section. + +EDOT Collector supports delta, not cumulative metrics. Applications that send +OpenTelemetry metrics using the official OTEL SDK need to export this variable: +```bash +OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE=delta +``` + +Alternatively, you can use [EDOT language SDKs][edot-sdks] which set this by +default. + +--- +[edot-sdks]: https://github.com/elastic/opentelemetry?tab=readme-ov-file#edot-sdks--agents diff --git a/k8s/k8s-manifest-elastic.yml b/k8s/k8s-manifest-elastic.yml index 41fb1687..6801c361 100644 --- a/k8s/k8s-manifest-elastic.yml +++ b/k8s/k8s-manifest-elastic.yml @@ -1,10 +1,14 @@ -# This is a simple k8s manifest to start Elasticsearch, Kibana and APM server -# with the same configuration as ../docker/docker-compose-elastic.yml +# This is a Kubernetes manifest that runs a local Elastic Stack comprised of +# Elasticsearch, Kibana and Elastic Distribution of OpenTelemetry (EDOT) +# Collector. +# +# This has the same configuration as ../docker/docker-compose-elastic.yml # # For this reason, if trying to understand why a setting exists, look at the # docker variant first. Similarly, updates to the docker variant should happen # here as well. +--- apiVersion: v1 kind: Service metadata: @@ -33,7 +37,7 @@ spec: spec: containers: - name: elasticsearch - image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 + image: docker.elastic.co/elasticsearch/elasticsearch:9.0.0 ports: - containerPort: 9200 env: @@ -56,7 +60,7 @@ spec: - name: xpack.license.self_generated.type value: trial # Note that ELSER is recommended to have 2GB, but it is JNI (PyTorch). - # ELSER's memory is in addition to the heap and other overhead. + # So, ELSER's memory is in addition to the heap and other overhead. - name: ES_JAVA_OPTS value: "-Xms2g -Xmx2g" securityContext: @@ -65,8 +69,9 @@ spec: drop: ["ALL"] readinessProbe: exec: + # Readiness probe adapted from the Docker healthcheck script. command: ["sh", "-c", "curl --max-time 1 -s http://localhost:9200 | grep -q 'missing authentication credentials'"] - initialDelaySeconds: 5 + initialDelaySeconds: 10 periodSeconds: 1 timeoutSeconds: 10 failureThreshold: 120 @@ -102,7 +107,7 @@ spec: # gen-ai assistants in kibana save state in a way that requires system # access, so set kibana_system's password to a known value. - name: setup-kibana-system-user - image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 + image: docker.elastic.co/elasticsearch/elasticsearch:9.0.0 command: - bash - -c @@ -114,7 +119,7 @@ spec: -H "Content-Type: application/json" | grep -q "^{}"; do sleep 5; done; containers: - name: kibana - image: docker.elastic.co/kibana/kibana:8.17.2 + image: docker.elastic.co/kibana/kibana:9.0.0 ports: - containerPort: 5601 env: @@ -126,6 +131,8 @@ spec: value: kibana_system - name: ELASTICSEARCH_PASSWORD value: elastic + # Non-default settings from here: + # https://github.com/elastic/apm-server/blob/main/testing/docker/kibana/kibana.yml - name: MONITORING_UI_CONTAINER_ELASTICSEARCH_ENABLED value: "true" - name: XPACK_SECURITY_ENCRYPTIONKEY @@ -143,70 +150,115 @@ spec: periodSeconds: 1 failureThreshold: 300 +--- +# This is the minimal yaml configuration needed to listen on all interfaces for +# OTLP logs, metrics and traces, exporting to Elasticsearch. +apiVersion: v1 +kind: ConfigMap +metadata: + name: edot-collector-config +data: + config.yaml: | + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + connectors: + elasticapm: + processors: + elastictrace: + exporters: + elasticsearch: + endpoint: http://elasticsearch.default.svc:9200 + user: elastic + password: elastic + mapping: + mode: otel + logs_dynamic_index: + enabled: true + metrics_dynamic_index: + enabled: true + traces_dynamic_index: + enabled: true + flush: + bytes: 1048576 # apm-server default instead of 5000000 + interval: 1s # apm-server default instead of 30s + service: + pipelines: + traces: + receivers: [otlp] + processors: [elastictrace] + exporters: [elasticapm, elasticsearch] + metrics: + receivers: [otlp] + processors: [] + exporters: [elasticsearch] + metrics/aggregated: + receivers: [elasticapm] + processors: [] + exporters: [elasticsearch] + logs: + receivers: [otlp] + processors: [] + exporters: [elasticapm, elasticsearch] + --- apiVersion: v1 kind: Service metadata: - name: apm-server + name: otel-collector spec: ports: - - port: 8200 - targetPort: 8200 + - name: grpc + port: 4317 + targetPort: 4317 + - name: http + port: 4318 + targetPort: 4318 selector: - app: apm-server + app: otel-collector --- apiVersion: apps/v1 kind: Deployment metadata: - name: apm-server + name: otel-collector spec: replicas: 1 selector: matchLabels: - app: apm-server + app: otel-collector template: metadata: labels: - app: apm-server + app: otel-collector spec: - initContainers: - - name: await-kibana - image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 - command: - - bash - - -xc - - | - echo "Waiting for kibana to be available"; - until curl --max-time 1 -s http://kibana.default.svc:5601/api/status | grep -q 'available'; do sleep 1; done; containers: - - name: apm-server - image: docker.elastic.co/apm/apm-server:8.17.2 - command: - - apm-server - - -E - - apm-server.kibana.enabled=true - - -E - - apm-server.kibana.host=http://kibana.default.svc:5601 - - -E - - apm-server.kibana.username=elastic - - -E - - apm-server.kibana.password=elastic - - -E - - output.elasticsearch.hosts=["http://elasticsearch.default.svc:9200"] - - -E - - output.elasticsearch.username=elastic - - -E - - output.elasticsearch.password=elastic + - name: otel-collector + image: docker.elastic.co/elastic-agent/elastic-otel-collector:9.0.0 + command: ["/usr/bin/tini", "--"] + args: ["/usr/local/bin/docker-entrypoint", "--config=/usr/share/elastic-agent/config.yaml"] + volumeMounts: + - name: config + mountPath: /usr/share/elastic-agent/config.yaml + subPath: config.yaml ports: - - containerPort: 8200 + - containerPort: 4317 + - containerPort: 4318 readinessProbe: tcpSocket: - port: 8200 + port: 4317 initialDelaySeconds: 1 periodSeconds: 1 failureThreshold: 300 securityContext: capabilities: - add: ["CHOWN", "DAC_OVERRIDE", "SETGID", "SETUID"] - drop: ["ALL"] + add: [ "CHOWN", "DAC_OVERRIDE", "SETGID", "SETUID" ] + drop: [ "ALL" ] + volumes: + - name: config + configMap: + name: edot-collector-config