Mirror of https://github.com/roostorg/osprey github.com/roostorg/osprey
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at dca6bada9d5beea2eeea6a7fa71857e16cef2c05 356 lines 9.8 kB view raw
1# Volumes for druid purposes only 2volumes: 3 metadata_data: {} 4 middle_var: {} 5 historical_var: {} 6 broker_var: {} 7 coordinator_var: {} 8 router_var: {} 9 druid_shared: {} 10 minio_data: {} 11 12services: 13 kafka: 14 image: confluentinc/cp-kafka:7.4.0 15 hostname: kafka 16 container_name: kafka 17 ports: 18 - "9092:9092" 19 environment: 20 KAFKA_NODE_ID: 1 21 KAFKA_PROCESS_ROLES: "broker,controller" 22 KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka:29093" 23 KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER" 24 KAFKA_INTER_BROKER_LISTENER_NAME: "INTERNAL" 25 KAFKA_LISTENERS: "INTERNAL://kafka:29092,EXTERNAL://0.0.0.0:9092,CONTROLLER://kafka:29093" 26 KAFKA_ADVERTISED_LISTENERS: "INTERNAL://kafka:29092,EXTERNAL://localhost:9092" 27 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT" 28 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 29 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 30 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 31 KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 32 CLUSTER_ID: "P45WxmmWSe2CrdGoeJMcKg" 33 healthcheck: 34 test: 35 [ 36 "CMD", 37 "bash", 38 "-c", 39 "kafka-topics --bootstrap-server kafka:29092 --list", 40 ] 41 interval: 10s 42 timeout: 5s 43 retries: 5 44 45 minio: 46 image: minio/minio:latest 47 container_name: minio 48 ports: 49 - "9000:9000" # minio API 50 - "9001:9001" # minio Console 51 environment: 52 MINIO_ROOT_USER: minioadmin 53 MINIO_ROOT_PASSWORD: minioadmin123 54 volumes: 55 - minio_data:/data 56 command: server --console-address ":9001" /data 57 healthcheck: 58 test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] 59 interval: 10s 60 timeout: 5s 61 retries: 3 62 63 minio-bucket-init: 64 image: minio/mc:latest 65 depends_on: 66 minio: 67 condition: service_healthy 68 entrypoint: ["/bin/sh", "/init-minio-bucket.sh"] 69 volumes: 70 - ./init-minio-bucket.sh:/init-minio-bucket.sh 71 restart: "no" 72 73 kafka-topic-creator: 74 image: confluentinc/cp-kafka:7.4.0 75 depends_on: 76 kafka: 77 condition: service_healthy 78 command: > 79 bash -c " 80 kafka-topics --bootstrap-server kafka:29092 --create --if-not-exists --topic osprey.actions_input --partitions 3 --replication-factor 1 && 81 kafka-topics --bootstrap-server kafka:29092 --create --if-not-exists --topic osprey.execution_results --partitions 3 --replication-factor 1 && 82 kafka-topics --bootstrap-server kafka:29092 --list 83 " 84 85 osprey_worker: 86 container_name: osprey_worker 87 build: 88 context: . 89 dockerfile: osprey_worker/Dockerfile 90 depends_on: 91 kafka: 92 condition: service_healthy 93 kafka-topic-creator: 94 condition: service_completed_successfully 95 bigtable: 96 condition: service_healthy 97 bigtable_initializer: 98 condition: service_completed_successfully 99 minio: 100 condition: service_healthy 101 minio-bucket-init: 102 condition: service_completed_successfully 103 ports: 104 - "5001:5000" 105 command: ["osprey-worker"] 106 environment: 107 - PYTHONPATH=/osprey 108 - PORT=5000 109 - OSPREY_INPUT_STREAM_SOURCE=kafka 110 - OSPREY_STDOUT_OUTPUT_SINK=True 111 - OSPREY_KAFKA_BOOTSTRAP_SERVERS=["kafka:29092"] 112 - OSPREY_KAFKA_INPUT_STREAM_TOPIC=osprey.actions_input 113 # Client ID will default to the machine hostname if it isn't defined 114 - OSPREY_KAFKA_INPUT_STREAM_CLIENT_ID=localhost 115 - OSPREY_KAFKA_OUTPUT_SINK=True 116 - OSPREY_KAFKA_OUTPUT_TOPIC=osprey.execution_results 117 - OSPREY_KAFKA_OUTPUT_CLIENT_ID=localhost 118 - DD_TRACE_ENABLED=False 119 - DD_DOGSTATSD_DISABLE=True 120 - OSPREY_RULES_SINK_NUM_WORKERS=1 121 - BIGTABLE_EMULATOR_HOST=bigtable:8361 122 - OSPREY_EXECUTION_RESULT_STORAGE_BACKEND=minio 123 - OSPREY_MINIO_ENDPOINT=minio:9000 124 - OSPREY_MINIO_ACCESS_KEY=minioadmin 125 - OSPREY_MINIO_SECRET_KEY=minioadmin123 126 - OSPREY_MINIO_SECURE=false 127 - OSPREY_MINIO_EXECUTION_RESULTS_BUCKET=execution-output 128 - SNOWFLAKE_API_ENDPOINT=http://snowflake:8080 129 - OSPREY_RULES_PATH=./example_rules 130 volumes: 131 - ./osprey_worker:/osprey/osprey_worker 132 - ./osprey_rpc:/osprey/osprey_rpc 133 - ./example_rules:/osprey/example_rules 134 - ./entrypoint.sh:/osprey/entrypoint.sh 135 osprey_ui_api: 136 container_name: osprey_ui_api 137 build: 138 context: . 139 dockerfile: osprey_worker/Dockerfile 140 depends_on: 141 - osprey_worker 142 - druid-broker 143 - postgres 144 - snowflake 145 - bigtable 146 - bigtable_initializer 147 ports: 148 - "5004:5004" 149 command: ["osprey-ui-api"] 150 environment: 151 - PYTHONPATH=/osprey 152 - PORT=5004 153 - DEBUG=true 154 - FLASK_DEBUG=1 155 - FLASK_ENV=development 156 - DRUID_URL=http://druid-broker:8082 157 - POSTGRES_HOSTS={"osprey_db":"postgresql://osprey:FoolishPassword@postgres:5432/osprey"} 158 - DD_TRACE_ENABLED=False 159 - DD_DOGSTATSD_DISABLE=True 160 - OSPREY_RULES_PATH=/osprey/example_rules 161 - OSPREY_DISABLE_VALIDATION_EXPORTER=true 162 - BIGTABLE_EMULATOR_HOST=bigtable:8361 163 - SNOWFLAKE_API_ENDPOINT=http://snowflake:8080 164 - SNOWFLAKE_EPOCH=1420070400000 165 volumes: 166 - ./osprey_worker:/osprey/osprey_worker 167 - ./osprey_rpc:/osprey/osprey_rpc 168 - ./example_rules:/osprey/example_rules 169 170 osprey_ui: 171 container_name: osprey_ui 172 build: 173 context: . 174 dockerfile: osprey_ui/Dockerfile 175 depends_on: 176 - osprey_ui_api 177 ports: 178 - "5002:5002" 179 environment: 180 - NODE_ENV=development 181 - REACT_APP_API_BASE_URL=http://osprey_ui_api:5004 182 volumes: 183 - ./osprey_ui:/app 184 - /app/node_modules 185 186 snowflake: 187 container_name: snowflake_id_worker 188 image: ghcr.io/ayubun/snowflake-id-worker:0 189 ports: 190 - "8080:8080" 191 environment: 192 - WORKER_ID=0 193 - DATA_CENTER_ID=0 194 - EPOCH=1420070400000 195 restart: unless-stopped 196 197 bigtable: 198 container_name: bigtable_emulator 199 image: gcr.io/google.com/cloudsdktool/cloud-sdk:latest 200 ports: 201 - "8361:8361" 202 command: > 203 bash -c " 204 gcloud beta emulators bigtable start --host-port=0.0.0.0:8361 --project=osprey-dev 205 " 206 healthcheck: 207 test: ["CMD", "bash", "-c", "pgrep -f cbtemulator > /dev/null || exit 1"] 208 interval: 10s 209 timeout: 5s 210 retries: 5 211 restart: unless-stopped 212 213 bigtable_initializer: 214 container_name: bigtable_initializer 215 image: gcr.io/google.com/cloudsdktool/cloud-sdk:latest 216 depends_on: 217 bigtable: 218 condition: service_healthy 219 volumes: 220 - ./init-bigtable.sh:/init-bigtable.sh 221 command: ["/bin/bash", "/init-bigtable.sh"] 222 223 # Optional test data generator - run with: 224 # docker compose --profile test_data up kafka_test_data_producer -d 225 kafka_test_data_producer: 226 image: confluentinc/cp-kafka:7.4.0 227 container_name: kafka_test_data 228 depends_on: 229 kafka: 230 condition: service_healthy 231 kafka-topic-creator: 232 condition: service_completed_successfully 233 profiles: 234 - test_data 235 environment: 236 KAFKA_TOPIC: "osprey.actions_input" 237 KAFKA_BROKER: "kafka:29092" 238 volumes: 239 - ./example_data:/osprey/example_data 240 entrypoint: 241 - /bin/bash 242 command: ["/osprey/example_data/generate_test_data.sh"] 243 244 postgres: 245 container_name: postgres 246 image: postgres:latest 247 ports: 248 - "5432:5432" 249 volumes: 250 - metadata_data:/var/lib/postgresql/data 251 environment: 252 - POSTGRES_PASSWORD=FoolishPassword 253 - POSTGRES_USER=osprey 254 - POSTGRES_DB=osprey 255 256 # DRUID, HERE BE DRAGONS 257 # Need 3.5 or later for container nodes 258 druid-zookeeper: 259 container_name: druid-zookeeper 260 image: zookeeper:3.5.10 261 ports: 262 - "2181:2181" 263 environment: 264 - ZOO_MY_ID=1 265 266 druid-coordinator: 267 image: apache/druid:34.0.0 268 container_name: druid-coordinator 269 volumes: 270 - druid_shared:/opt/shared 271 - coordinator_var:/opt/druid/var 272 depends_on: 273 - druid-zookeeper 274 - postgres 275 ports: 276 - "8081:8081" 277 command: 278 - coordinator 279 env_file: 280 - druid/environment 281 282 druid-broker: 283 image: apache/druid:34.0.0 284 container_name: druid-broker 285 volumes: 286 - broker_var:/opt/druid/var 287 depends_on: 288 - druid-zookeeper 289 - postgres 290 - druid-coordinator 291 ports: 292 - "8082:8082" 293 command: 294 - broker 295 env_file: 296 - druid/environment 297 298 druid-historical: 299 image: apache/druid:34.0.0 300 container_name: druid-historical 301 volumes: 302 - druid_shared:/opt/shared 303 - historical_var:/opt/druid/var 304 depends_on: 305 - druid-zookeeper 306 - postgres 307 - druid-coordinator 308 ports: 309 - "8083:8083" 310 command: 311 - historical 312 env_file: 313 - druid/environment 314 315 druid-middlemanager: 316 image: apache/druid:34.0.0 317 container_name: druid-middlemanager 318 volumes: 319 - druid_shared:/opt/shared 320 - middle_var:/opt/druid/var 321 depends_on: 322 - druid-zookeeper 323 - postgres 324 - druid-coordinator 325 ports: 326 - "8091:8091" 327 - "8100-8105:8100-8105" 328 command: 329 - middleManager 330 env_file: 331 - druid/environment 332 333 druid-router: 334 image: apache/druid:34.0.0 335 container_name: druid-router 336 volumes: 337 - router_var:/opt/druid/var 338 depends_on: 339 - druid-zookeeper 340 - postgres 341 - druid-coordinator 342 ports: 343 - "8888:8888" 344 command: 345 - router 346 env_file: 347 - druid/environment 348 349 druid-spec-submitter: 350 image: curlimages/curl:latest 351 depends_on: 352 - druid-coordinator 353 volumes: 354 - ./druid/specs:/specs 355 command: ["/bin/sh", "/specs/submit-specs.sh"] 356 restart: "no"