Remove local uwsgi instrumentation and local development tempo and agent

This commit is contained in:
Ildar Iskhakov 2023-01-04 22:25:17 +08:00
parent 8e61d0fde0
commit 15256bc4cf
7 changed files with 14 additions and 159 deletions

View file

@ -70,7 +70,6 @@ The possible profiles values are:
- `rabbitmq`
- `postgres`
- `mysql`
- `tracing`
The default is `engine,oncall_ui,redis,grafana`. This runs:
@ -174,13 +173,6 @@ export DRONE_TOKEN=<Your DRONE_TOKEN>
drone sign --save grafana/oncall .drone.yml
```
## Tracing setup
Run these steps to enable tracing in your local deployment
1. Add `tracing` to COMPOSE_PROFILES variable (more in [`COMPOSE_PROFILES`](#compose_profiles))
2. Start the application and check tracing spans at [Grafana Explore Tab](http://localhost:3000/explore), datasource Tempo
## Troubleshooting
### ld: library not found for -lssl

View file

@ -1,16 +0,0 @@
server:
log_level: debug
traces:
configs:
- name: default
receivers:
otlp:
protocols:
grpc:
remote_write:
- endpoint: tempo:4317
insecure: true
batch:
timeout: 5s
send_batch_size: 100

View file

@ -1,30 +0,0 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
uid: prometheus
access: proxy
orgId: 1
url: http://prometheus:9090
basicAuth: false
isDefault: false
version: 1
editable: false
jsonData:
httpMethod: GET
- name: Tempo
type: tempo
access: proxy
orgId: 1
url: http://tempo:3200
basicAuth: false
isDefault: true
version: 1
editable: false
apiVersion: 1
uid: tempo
jsonData:
httpMethod: GET
serviceMap:
datasourceUid: prometheus

View file

@ -1,62 +0,0 @@
search_enabled: true
metrics_generator_enabled: true
server:
http_listen_port: 3200
distributor:
receivers: # this configuration will listen on all ports and protocols that tempo is capable of.
jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can
protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver
thrift_http: #
grpc: # for a production deployment you should only enable the receivers you need!
thrift_binary:
thrift_compact:
zipkin:
otlp:
protocols:
http:
grpc:
opencensus:
ingester:
trace_idle_period: 10s # the length of time after a trace has not received spans to consider it complete and flush it
max_block_bytes: 1_000_000 # cut the head block when it hits this size or ...
max_block_duration: 5m # this much time passes
compactor:
compaction:
compaction_window: 1h # blocks in this time window will be compacted together
max_block_bytes: 100_000_000 # maximum size of compacted blocks
block_retention: 1h
compacted_block_retention: 10m
metrics_generator:
registry:
external_labels:
source: tempo
cluster: docker-compose
storage:
path: /tmp/tempo/generator/wal
remote_write:
- url: http://prometheus:9090/api/v1/write
send_exemplars: true
storage:
trace:
backend: local # backend configuration to use
block:
bloom_filter_false_positive: .05 # bloom filter false positive rate. lower values create larger filters but fewer false positives
index_downsample_bytes: 1000 # number of bytes per index record
encoding: zstd # block encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd, s2
wal:
path: /tmp/tempo/wal # where to store the the wal locally
encoding: snappy # wal encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd, s2
local:
path: /tmp/tempo/blocks
pool:
max_workers: 100 # worker pool determines the number of parallel requests to the object store backend
queue_depth: 10000
overrides:
metrics_generator_processors: [service-graphs, span-metrics]

View file

@ -25,9 +25,6 @@ x-env-vars: &oncall-env-vars
BROKER_TYPE: ${BROKER_TYPE}
GRAFANA_API_URL: http://localhost:3000
GOOGLE_APPLICATION_CREDENTIALS: /etc/app/gcp_service_account.json
OTEL_TRACING_ENABLED: True
OTEL_SERVICE_NAME: oncall
OTEL_EXPORTER_OTLP_ENDPOINT: http://localhost:4317
# basically this is needed because the oncall backend containers have been configured to communicate w/ grafana via
# http://localhost:3000 (GRAFANA_API_URL). This URL is used in two scenarios:
@ -298,7 +295,6 @@ services:
volumes:
- grafanadata_dev:/var/lib/grafana
- ./grafana-plugin:/var/lib/grafana/plugins/grafana-plugin
- ./dev/conf/grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
depends_on:
postgres:
condition: service_healthy
@ -306,34 +302,6 @@ services:
condition: service_healthy
profiles:
- grafana
agent:
image: grafana/agent:v0.27.1
volumes:
- ./dev/conf/agent.yaml:/etc/agent.yaml
entrypoint:
- /bin/agent
- -config.file=/etc/agent.yaml
ports:
- "4317:4317"
profiles:
- tracing
tempo:
image: grafana/tempo:latest
command: [ "-config.file=/etc/tempo.yaml" ]
volumes:
- ./dev/conf/tempo-local.yaml:/etc/tempo.yaml
- tempodata_dev:/tmp/tempo
ports:
- "14268" # jaeger ingest
- "3200" # tempo
- "4317" # otlp grpc
- "4318" # otlp http
- "9411" # zipkin
profiles:
- tracing
volumes:
redisdata_dev:
labels: *oncall-labels
@ -345,8 +313,6 @@ volumes:
labels: *oncall-labels
mysqldata_dev:
labels: *oncall-labels
tempodata_dev:
labels: *oncall-labels
networks:
default:

View file

@ -17,7 +17,6 @@ from opentelemetry.instrumentation.pymysql import PyMySQLInstrumentor
from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from uwsgidecorators import postfork
from whitenoise import WhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.prod")
@ -26,11 +25,18 @@ application = get_wsgi_application()
application = WhiteNoise(application)
if settings.OTEL_TRACING_ENABLED and settings.OTEL_EXPORTER_OTLP_ENDPOINT:
application = OpenTelemetryMiddleware(application)
try:
from uwsgidecorators import postfork
@postfork
def init_tracing():
trace.set_tracer_provider(TracerProvider())
span_processor = BatchSpanProcessor(OTLPSpanExporter())
trace.get_tracer_provider().add_span_processor(span_processor)
PyMySQLInstrumentor().instrument()
application = OpenTelemetryMiddleware(application)
@postfork
def init_tracing():
trace.set_tracer_provider(TracerProvider())
span_processor = BatchSpanProcessor(OTLPSpanExporter())
trace.get_tracer_provider().add_span_processor(span_processor)
PyMySQLInstrumentor().instrument()
except ModuleNotFoundError:
# Only works under uwsgi web server environment
pass

View file

@ -45,4 +45,3 @@ opentelemetry-instrumentation-celery==0.36b0
opentelemetry-instrumentation-pymysql==0.36b0
opentelemetry-instrumentation-wsgi==0.36b0
opentelemetry-exporter-otlp-proto-grpc==1.15.0
uwsgidecorators==1.1.0