Mirror of https://github.com/roostorg/coop github.com/roostorg/coop
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 3f8c425df5edf1886efb0d7dd4eccb3c63279ce2 244 lines 6.6 kB view raw
1services: 2 redis: 3 image: redis 4 volumes: 5 - redis_data:/data 6 ports: 7 - '6379:6379' 8 9 # Runs all migrations from scratch, after clearing the database(s). 10 migrations: 11 image: node:24-bullseye-slim 12 command: bash -c 'set -e 13 npm i && ( [ "$CI" = "true" ] && npm run db:clean -- --env staging || true ) 14 && for db in api-server-pg scylla clickhouse; do npm run db:create -- --db "$$db" --env staging; npm run db:update -- --db "$$db" --env staging; done' 15 working_dir: /src 16 env_file: ./.env.githubci 17 environment: 18 # when running migrations locally, you must set these to target your 19 # personal db, as in: 20 # 21 # docker compose run \ 22 # -e SNOWFLAKE_ROLE=ACCOUNTADMIN \ 23 # -e SNOWFLAKE_USERNAME=YOUR_USERNAME \ 24 # -e SNOWFLAKE_PASSWORD='YOURPASSWORD' \ 25 # -e SNOWFLAKE_DB_NAME=YOURDB \ 26 # migrations 27 - MIGRATOR_DB_NAME 28 - CLICKHOUSE_DATABASE 29 - NPM_TOKEN 30 volumes: 31 - .:/src 32 depends_on: 33 postgres: 34 condition: service_healthy 35 scylla: 36 condition: service_healthy 37 clickhouse: 38 condition: service_healthy 39 40 drop_dbs: 41 image: node:24-bullseye-slim 42 command: bash -c 'npm i && npm run db:drop -- --env staging' 43 working_dir: /src 44 env_file: ./.env.githubci 45 environment: 46 # when running migrations locally, you must set these to target your 47 # personal db, as in: 48 # 49 # docker compose run \ 50 # -e SNOWFLAKE_ROLE=ACCOUNTADMIN \ 51 # -e SNOWFLAKE_USERNAME=YOUR_USERNAME \ 52 # -e SNOWFLAKE_PASSWORD='YOURPASSWORD' \ 53 # -e SNOWFLAKE_DB_NAME=YOURDB \ 54 # drop_dbs 55 - CLICKHOUSE_DATABASE 56 - MIGRATOR_DB_NAME 57 - NPM_TOKEN 58 volumes: 59 - .:/src 60 depends_on: 61 - postgres 62 63 postgres: 64 image: ankane/pgvector:v0.5.1 65 # image: postgres:15.3 66 volumes: 67 - pg_data:/var/lib/postgresql/data 68 ports: 69 - '5432:5432' 70 healthcheck: 71 test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'postgres'] 72 interval: 2s 73 start_period: 2s 74 environment: 75 POSTGRES_PASSWORD: postgres123 76 POSTGRES_USER: postgres 77 POSTGRES_DB: postgres 78 79 hma: 80 build: 81 context: ./hma 82 dockerfile: Dockerfile 83 environment: 84 - POSTGRES_PASSWORD=postgres123 85 - POSTGRES_USER=postgres 86 - POSTGRES_DB=postgres 87 - POSTGRES_HOST=postgres 88 ports: 89 - '5000:5000' 90 depends_on: 91 - postgres 92 93 scylla: 94 image: scylladb/scylla:5.2 95 volumes: 96 - scylla_data:/var/lib/scylla 97 ports: 98 - '9042:9042' 99 healthcheck: 100 test: ['CMD-SHELL', 'nodetool status || exit 1'] 101 interval: 5s 102 timeout: 5s 103 retries: 28 104 start_period: 35s 105 106 zookeeper: 107 image: zookeeper:3.7.0 108 container_name: zookeeper 109 ports: 110 - '22181:2181' 111 environment: 112 ZOOKEEPER_CLIENT_PORT: 2181 113 ZOOKEEPER_TICK_TIME: 2000 114 115 kafka: 116 image: confluentinc/cp-kafka:8.1.0-1-ubi9 117 container_name: kafka 118 depends_on: 119 - zookeeper 120 ports: 121 - '29092:29092' 122 # https://github.com/confluentinc/kafka-images/issues/127#issuecomment-1152703071 123 user: root 124 environment: 125 KAFKA_BROKER_ID: 1 126 KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 127 KAFKA_LISTENERS: PLAINTEXT://kafka:9092,CONTROLLER://kafka:9093,PLAINTEXT_HOST://0.0.0.0:29092 128 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092 129 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 130 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 131 KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT 132 KAFKA_PROCESS_ROLES: broker,controller 133 KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER 134 KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka:9093 135 136 schema_registry: 137 image: confluentinc/cp-schema-registry 138 container_name: schema-registry 139 depends_on: 140 - kafka 141 ports: 142 - '8081:8081' 143 environment: 144 SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper:2181 145 SCHEMA_REGISTRY_HOST_NAME: schema_registry 146 SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: kafka:9092 147 volumes: 148 - schema_registry_data:/var/lib/schema-registry 149 150 # Runs the api server's tests 151 test: 152 build: 153 context: . 154 dockerfile: Dockerfile 155 target: server_base 156 args: 157 - NPM_TOKEN 158 - OMIT_SNOWFLAKE 159 command: bash -c 'npm run test:ci' 160 working_dir: /app 161 volumes: 162 - ./server/reports:/app/reports 163 env_file: ./.env.githubci 164 depends_on: 165 migrations: 166 condition: service_completed_successfully 167 redis: 168 condition: service_started 169 kafka: 170 condition: service_started 171 schema_registry: 172 condition: service_started 173 174 175 lint: 176 build: 177 context: . 178 dockerfile: Dockerfile 179 target: server_base 180 args: 181 - NPM_TOKEN 182 command: npm run lint 183 184 lint-client: 185 build: 186 context: client 187 target: client_base 188 args: 189 - NPM_TOKEN 190 command: npm run lint 191 volumes: 192 - ./client/eslint:/app/eslint 193 - ./client/.eslintrc.cjs:/app/.eslintrc.cjs 194 195 jaeger: 196 image: jaegertracing/all-in-one:latest 197 ports: 198 - '16686:16686' 199 - '14268' 200 - '14250' 201 environment: 202 - LOG_LEVEL=info 203 204 otel-collector: 205 # We have to pin to this version of the collector, because versions beyond 206 # this do not support the Jaeger exporter. See here for details: 207 # https://github.com/open-telemetry/opentelemetry-specification/pull/2858 208 image: otel/opentelemetry-collector-contrib:0.71.0 209 volumes: 210 - ./otel-collector.yaml:/etc/otel-collector.yaml 211 command: ['--config=/etc/otel-collector.yaml'] 212 ports: 213 - '1888:1888' # pprof extension 214 - '13133:13133' # health_check extension 215 - '4317:4317' # OTLP gRPC receiver 216 - '55670:55679' # zpages extension 217 depends_on: 218 - jaeger 219 220 clickhouse: 221 image: clickhouse/clickhouse-server:24.3 222 container_name: clickhouse 223 ports: 224 - '8123:8123' # HTTP interface 225 - '9000:9000' # Native TCP interface 226 volumes: 227 - clickhouse_data:/var/lib/clickhouse 228 environment: 229 CLICKHOUSE_DB: analytics 230 CLICKHOUSE_USER: default 231 CLICKHOUSE_PASSWORD: 'clickhouse' 232 healthcheck: 233 test: ['CMD-SHELL', 'clickhouse-client --host localhost --query "SELECT 1"'] 234 interval: 5s 235 timeout: 3s 236 retries: 5 237 238volumes: 239 pg_data: 240 scylla_data: 241 redis_data: 242 kafka_data: 243 schema_registry_data: 244 clickhouse_data: