Mirror of https://github.com/roostorg/osprey github.com/roostorg/osprey
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

renamed kafka to osprey-kafka (#119)

Co-authored-by: Juliet Shen <juliet@roost.tools>

authored by

juliet
Juliet Shen
and committed by
GitHub
5210214d 106145d7

+30 -30
+3 -3
docker-compose.test.yaml
··· 43 43 context: . 44 44 dockerfile: osprey_worker/Dockerfile 45 45 depends_on: 46 - kafka: 46 + osprey-kafka: 47 47 condition: service_healthy 48 - kafka-topic-creator: 48 + osprey-kafka-topic-creator: 49 49 condition: service_completed_successfully 50 50 bigtable: 51 51 condition: service_healthy ··· 67 67 - PYTHONPATH=/osprey 68 68 - OSPREY_INPUT_STREAM_SOURCE=kafka 69 69 - OSPREY_STDOUT_OUTPUT_SINK=True 70 - - OSPREY_KAFKA_BOOTSTRAP_SERVERS=["kafka:29092"] 70 + - OSPREY_KAFKA_BOOTSTRAP_SERVERS=["osprey-kafka:29092"] 71 71 - OSPREY_KAFKA_INPUT_STREAM_TOPIC=osprey.actions_input 72 72 - OSPREY_KAFKA_INPUT_STREAM_CLIENT_ID=localhost 73 73 - OSPREY_KAFKA_OUTPUT_SINK=True
+22 -22
docker-compose.yaml
··· 10 10 minio_data: {} 11 11 12 12 services: 13 - kafka: 13 + osprey-kafka: 14 14 image: confluentinc/cp-kafka:7.4.0 15 - hostname: kafka 16 - container_name: kafka 15 + hostname: osprey-kafka 16 + container_name: osprey-kafka 17 17 ports: 18 18 - "9092:9092" 19 19 environment: 20 20 KAFKA_NODE_ID: 1 21 21 KAFKA_PROCESS_ROLES: "broker,controller" 22 - KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka:29093" 22 + KAFKA_CONTROLLER_QUORUM_VOTERS: "1@osprey-kafka:29093" 23 23 KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER" 24 24 KAFKA_INTER_BROKER_LISTENER_NAME: "INTERNAL" 25 - KAFKA_LISTENERS: "INTERNAL://kafka:29092,EXTERNAL://0.0.0.0:9092,CONTROLLER://kafka:29093" 26 - KAFKA_ADVERTISED_LISTENERS: "INTERNAL://kafka:29092,EXTERNAL://localhost:9092" 25 + KAFKA_LISTENERS: "INTERNAL://osprey-kafka:29092,EXTERNAL://0.0.0.0:9092,CONTROLLER://osprey-kafka:29093" 26 + KAFKA_ADVERTISED_LISTENERS: "INTERNAL://osprey-kafka:29092,EXTERNAL://localhost:9092" 27 27 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT" 28 28 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 29 29 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 ··· 36 36 "CMD", 37 37 "bash", 38 38 "-c", 39 - "kafka-topics --bootstrap-server kafka:29092 --list", 39 + "kafka-topics --bootstrap-server osprey-kafka:29092 --list", 40 40 ] 41 41 interval: 10s 42 42 timeout: 5s ··· 71 71 - ./init-minio-bucket.sh:/init-minio-bucket.sh 72 72 restart: "no" 73 73 74 - kafka-topic-creator: 74 + osprey-kafka-topic-creator: 75 75 image: confluentinc/cp-kafka:7.4.0 76 76 depends_on: 77 - kafka: 77 + osprey-kafka: 78 78 condition: service_healthy 79 79 command: > 80 80 bash -c " 81 - kafka-topics --bootstrap-server kafka:29092 --create --if-not-exists --topic osprey.actions_input --partitions 3 --replication-factor 1 && 82 - kafka-topics --bootstrap-server kafka:29092 --create --if-not-exists --topic osprey.execution_results --partitions 3 --replication-factor 1 && 83 - kafka-topics --bootstrap-server kafka:29092 --list 81 + kafka-topics --bootstrap-server osprey-kafka:29092 --create --if-not-exists --topic osprey.actions_input --partitions 3 --replication-factor 1 && 82 + kafka-topics --bootstrap-server osprey-kafka:29092 --create --if-not-exists --topic osprey.execution_results --partitions 3 --replication-factor 1 && 83 + kafka-topics --bootstrap-server osprey-kafka:29092 --list 84 84 " 85 85 86 86 osprey-worker: ··· 90 90 context: . 91 91 dockerfile: osprey_worker/Dockerfile 92 92 depends_on: 93 - kafka: 93 + osprey-kafka: 94 94 condition: service_healthy 95 - kafka-topic-creator: 95 + osprey-kafka-topic-creator: 96 96 condition: service_completed_successfully 97 97 bigtable: 98 98 condition: service_healthy ··· 111 111 - POSTGRES_HOSTS={"osprey_db":"postgresql://osprey:FoolishPassword@postgres:5432/osprey"} 112 112 - OSPREY_INPUT_STREAM_SOURCE=kafka 113 113 - OSPREY_STDOUT_OUTPUT_SINK=True 114 - - OSPREY_KAFKA_BOOTSTRAP_SERVERS=["kafka:29092"] 114 + - OSPREY_KAFKA_BOOTSTRAP_SERVERS=["osprey-kafka:29092"] 115 115 - OSPREY_KAFKA_INPUT_STREAM_TOPIC=osprey.actions_input 116 116 # Client ID will default to the machine hostname if it isn't defined 117 117 - OSPREY_KAFKA_INPUT_STREAM_CLIENT_ID=localhost ··· 229 229 command: ["/bin/bash", "/init-bigtable.sh"] 230 230 231 231 # Optional test data generator - run with: 232 - # docker compose --profile test_data up kafka-test-data-producer -d 233 - kafka-test-data-producer: 232 + # docker compose --profile test_data up osprey-kafka-test-data-producer -d 233 + osprey-kafka-test-data-producer: 234 234 image: confluentinc/cp-kafka:7.4.0 235 - hostname: kafka-test-data-producer 236 - container_name: kafka-test-data-producer 235 + hostname: osprey-kafka-test-data-producer 236 + container_name: osprey-kafka-test-data-producer 237 237 depends_on: 238 - kafka: 238 + osprey-kafka: 239 239 condition: service_healthy 240 - kafka-topic-creator: 240 + osprey-kafka-topic-creator: 241 241 condition: service_completed_successfully 242 242 profiles: 243 243 - test_data 244 244 - test-data 245 245 environment: 246 246 KAFKA_TOPIC: "osprey.actions_input" 247 - KAFKA_BROKER: "kafka:29092" 247 + KAFKA_BROKER: "osprey-kafka:29092" 248 248 volumes: 249 249 - ./example_data:/osprey/example_data 250 250 entrypoint:
+5 -5
example_docker_compose/run_osprey_with_coordinator/docker-compose.coordinator.yaml
··· 6 6 # Override worker to connect to coordinator instead of Kafka directly 7 7 osprey-worker: 8 8 depends_on: 9 - kafka: 9 + osprey-kafka: 10 10 condition: service_healthy 11 - kafka-topic-creator: 11 + osprey-kafka-topic-creator: 12 12 condition: service_completed_successfully 13 13 bigtable: 14 14 condition: service_healthy ··· 47 47 - OSPREY_COORDINATOR_BIDI_STREAM_PORT=19950 48 48 - OSPREY_COORDINATOR_SYNC_ACTION_PORT=19951 49 49 - POD_IP=osprey-coordinator 50 - - OSPREY_KAFKA_BOOTSTRAP_SERVERS=kafka:29092 50 + - OSPREY_KAFKA_BOOTSTRAP_SERVERS=osprey-kafka:29092 51 51 - OSPREY_KAFKA_INPUT_STREAM_TOPIC=osprey.actions_input 52 52 - OSPREY_KAFKA_GROUP_ID=osprey_coordinator_group 53 53 - MAX_TIME_TO_SEND_TO_ASYNC_QUEUE_MS=500 ··· 57 57 condition: service_healthy 58 58 snowflake-id-worker: 59 59 condition: service_started 60 - kafka: 60 + osprey-kafka: 61 61 condition: service_healthy 62 - kafka-topic-creator: 62 + osprey-kafka-topic-creator: 63 63 condition: service_completed_successfully 64 64 65 65 # Add etcd service (required by coordinator)