this repo has no description
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

at main 2098 lines 94 kB view raw
1""" 2Copyright 2020 The Rook Authors. All rights reserved. 3 4Licensed under the Apache License, Version 2.0 (the "License"); 5you may not use this file except in compliance with the License. 6You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10Unless required by applicable law or agreed to in writing, software 11distributed under the License is distributed on an "AS IS" BASIS, 12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13See the License for the specific language governing permissions and 14limitations under the License. 15""" 16 17import errno 18import sys 19import json 20import argparse 21import re 22import subprocess 23import hmac 24from hashlib import sha1 as sha 25from os import linesep as LINESEP 26from os import path 27from email.utils import formatdate 28import requests 29from requests.auth import AuthBase 30 31py3k = False 32if sys.version_info.major >= 3: 33 py3k = True 34 import urllib.parse 35 from ipaddress import ip_address, IPv4Address 36 37ModuleNotFoundError = ImportError 38 39try: 40 import rados 41except ModuleNotFoundError as noModErr: 42 print(f"Error: {noModErr}\nExiting the script...") 43 sys.exit(1) 44 45try: 46 import rbd 47except ModuleNotFoundError as noModErr: 48 print(f"Error: {noModErr}\nExiting the script...") 49 sys.exit(1) 50 51try: 52 # for 2.7.x 53 from StringIO import StringIO 54except ModuleNotFoundError: 55 # for 3.x 56 from io import StringIO 57 58try: 59 # for 2.7.x 60 from urlparse import urlparse 61 from urllib import urlencode as urlencode 62except ModuleNotFoundError: 63 # for 3.x 64 from urllib.parse import urlparse 65 from urllib.parse import urlencode as urlencode 66 67try: 68 from base64 import encodestring 69except: 70 from base64 import encodebytes as encodestring 71 72 73class ExecutionFailureException(Exception): 74 pass 75 76 77################################################ 78################## DummyRados ################## 79################################################ 80# this is mainly for testing and could be used where 'rados' is not available 81 82 83class DummyRados(object): 84 def __init__(self): 85 self.return_val = 0 86 self.err_message = "" 87 self.state = "connected" 88 self.cmd_output_map = {} 89 self.cmd_names = {} 90 self._init_cmd_output_map() 91 self.dummy_host_ip_map = {} 92 93 def _init_cmd_output_map(self): 94 json_file_name = "test-data/ceph-status-out" 95 script_dir = path.abspath(path.dirname(__file__)) 96 ceph_status_str = "" 97 with open( 98 path.join(script_dir, json_file_name), mode="r", encoding="UTF-8" 99 ) as json_file: 100 ceph_status_str = json_file.read() 101 self.cmd_names["fs ls"] = """{"format": "json", "prefix": "fs ls"}""" 102 self.cmd_names["quorum_status"] = ( 103 """{"format": "json", "prefix": "quorum_status"}""" 104 ) 105 self.cmd_names["mgr services"] = ( 106 """{"format": "json", "prefix": "mgr services"}""" 107 ) 108 # all the commands and their output 109 self.cmd_output_map[self.cmd_names["fs ls"]] = ( 110 """[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-replicated"]}]""" 111 ) 112 self.cmd_output_map[self.cmd_names["quorum_status"]] = ( 113 """{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}""" 114 ) 115 self.cmd_output_map[self.cmd_names["mgr services"]] = ( 116 """{"dashboard":"https://ceph-dashboard:8443/","prometheus":"http://ceph-dashboard-db:9283/"}""" 117 ) 118 self.cmd_output_map[ 119 """{"caps": ["mon", "allow r, allow command quorum_status", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}""" 120 ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]""" 121 self.cmd_output_map[ 122 """{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}""" 123 ] = """[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]""" 124 self.cmd_output_map[ 125 """{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}""" 126 ] = """[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]""" 127 self.cmd_output_map[ 128 """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}""" 129 ] = """[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs *=*"}}]""" 130 self.cmd_output_map[ 131 """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}""" 132 ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]""" 133 self.cmd_output_map[ 134 """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get-or-create"}""" 135 ] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage","key":"BQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]""" 136 self.cmd_output_map[ 137 """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=myfs"], "entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get-or-create"}""" 138 ] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage-myfs","key":"CQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=myfs"}}]""" 139 self.cmd_output_map[ 140 """{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}""" 141 ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]""" 142 self.cmd_output_map[ 143 """{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth caps"}""" 144 ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSRKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]""" 145 self.cmd_output_map["""{"format": "json", "prefix": "mgr services"}"""] = ( 146 """{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}""" 147 ) 148 self.cmd_output_map[ 149 """{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}""" 150 ] = """{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}""" 151 self.cmd_output_map[ 152 """{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}""" 153 ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]""" 154 self.cmd_output_map[ 155 """{"entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get"}""" 156 ] = """[]""" 157 self.cmd_output_map[ 158 """{"entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get"}""" 159 ] = """[]""" 160 self.cmd_output_map[ 161 """{"entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get"}""" 162 ] = """[]""" 163 self.cmd_output_map[ 164 """{"entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get"}""" 165 ] = """[]""" 166 self.cmd_output_map[ 167 """{"entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get"}""" 168 ] = """[]""" 169 self.cmd_output_map[ 170 """{"entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get"}""" 171 ] = """[]""" 172 self.cmd_output_map[ 173 """{"entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get"}""" 174 ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]""" 175 self.cmd_output_map[ 176 """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth caps"}""" 177 ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command 'osd blocklist'", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]""" 178 self.cmd_output_map['{"format": "json", "prefix": "status"}'] = ceph_status_str 179 180 def shutdown(self): 181 pass 182 183 def get_fsid(self): 184 return "af4e1673-0b72-402d-990a-22d2919d0f1c" 185 186 def conf_read_file(self): 187 pass 188 189 def connect(self): 190 pass 191 192 def pool_exists(self, pool_name): 193 return True 194 195 def mon_command(self, cmd, out): 196 json_cmd = json.loads(cmd) 197 json_cmd_str = json.dumps(json_cmd, sort_keys=True) 198 cmd_output = self.cmd_output_map[json_cmd_str] 199 return self.return_val, cmd_output, str(self.err_message.encode("utf-8")) 200 201 def _convert_hostname_to_ip(self, host_name): 202 ip_reg_x = re.compile(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}") 203 # if provided host is directly an IP address, return the same 204 if ip_reg_x.match(host_name): 205 return host_name 206 import random 207 208 host_ip = self.dummy_host_ip_map.get(host_name, "") 209 if not host_ip: 210 host_ip = f"172.9.{random.randint(0, 254)}.{random.randint(0, 254)}" 211 self.dummy_host_ip_map[host_name] = host_ip 212 del random 213 return host_ip 214 215 @classmethod 216 def Rados(conffile=None): 217 return DummyRados() 218 219 220class S3Auth(AuthBase): 221 """Attaches AWS Authentication to the given Request object.""" 222 223 service_base_url = "s3.amazonaws.com" 224 225 def __init__(self, access_key, secret_key, service_url=None): 226 if service_url: 227 self.service_base_url = service_url 228 self.access_key = str(access_key) 229 self.secret_key = str(secret_key) 230 231 def __call__(self, r): 232 # Create date header if it is not created yet. 233 if "date" not in r.headers and "x-amz-date" not in r.headers: 234 r.headers["date"] = formatdate(timeval=None, localtime=False, usegmt=True) 235 signature = self.get_signature(r) 236 if py3k: 237 signature = signature.decode("utf-8") 238 r.headers["Authorization"] = f"AWS {self.access_key}:{signature}" 239 return r 240 241 def get_signature(self, r): 242 canonical_string = self.get_canonical_string(r.url, r.headers, r.method) 243 if py3k: 244 key = self.secret_key.encode("utf-8") 245 msg = canonical_string.encode("utf-8") 246 else: 247 key = self.secret_key 248 msg = canonical_string 249 h = hmac.new(key, msg, digestmod=sha) 250 return encodestring(h.digest()).strip() 251 252 def get_canonical_string(self, url, headers, method): 253 parsedurl = urlparse(url) 254 objectkey = parsedurl.path[1:] 255 256 bucket = parsedurl.netloc[: -len(self.service_base_url)] 257 if len(bucket) > 1: 258 # remove last dot 259 bucket = bucket[:-1] 260 261 interesting_headers = {"content-md5": "", "content-type": "", "date": ""} 262 for key in headers: 263 lk = key.lower() 264 try: 265 lk = lk.decode("utf-8") 266 except: 267 pass 268 if headers[key] and ( 269 lk in interesting_headers.keys() or lk.startswith("x-amz-") 270 ): 271 interesting_headers[lk] = headers[key].strip() 272 273 # If x-amz-date is used it supersedes the date header. 274 if not py3k: 275 if "x-amz-date" in interesting_headers: 276 interesting_headers["date"] = "" 277 else: 278 if "x-amz-date" in interesting_headers: 279 interesting_headers["date"] = "" 280 281 buf = f"{method}\n" 282 for key in sorted(interesting_headers.keys()): 283 val = interesting_headers[key] 284 if key.startswith("x-amz-"): 285 buf += f"{key}:{val}\n" 286 else: 287 buf += f"{val}\n" 288 289 # append the bucket if it exists 290 if bucket != "": 291 buf += f"/{bucket}" 292 293 # add the objectkey. even if it doesn't exist, add the slash 294 buf += f"/{objectkey}" 295 296 return buf 297 298 299class RadosJSON: 300 EXTERNAL_USER_NAME = "client.healthchecker" 301 EXTERNAL_RGW_ADMIN_OPS_USER_NAME = "rgw-admin-ops-user" 302 EMPTY_OUTPUT_LIST = "Empty output list" 303 DEFAULT_RGW_POOL_PREFIX = "default" 304 DEFAULT_MONITORING_ENDPOINT_PORT = "9283" 305 306 @classmethod 307 def gen_arg_parser(cls, args_to_parse=None): 308 argP = argparse.ArgumentParser() 309 310 common_group = argP.add_argument_group("common") 311 common_group.add_argument("--verbose", "-v", action="store_true", default=False) 312 common_group.add_argument( 313 "--ceph-conf", "-c", help="Provide a ceph conf file.", type=str 314 ) 315 common_group.add_argument( 316 "--keyring", "-k", help="Path to ceph keyring file.", type=str 317 ) 318 common_group.add_argument( 319 "--run-as-user", 320 "-u", 321 default="", 322 type=str, 323 help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'", 324 ) 325 common_group.add_argument( 326 "--cluster-name", 327 default="", 328 help="Kubernetes cluster name(legacy flag), Note: Either use this or --k8s-cluster-name", 329 ) 330 common_group.add_argument( 331 "--k8s-cluster-name", default="", help="Kubernetes cluster name" 332 ) 333 common_group.add_argument( 334 "--namespace", 335 default="", 336 help="Namespace where CephCluster is running", 337 ) 338 common_group.add_argument( 339 "--rgw-pool-prefix", default="", help="RGW Pool prefix" 340 ) 341 common_group.add_argument( 342 "--restricted-auth-permission", 343 default=False, 344 help="Restrict cephCSIKeyrings auth permissions to specific pools, cluster." 345 + "Mandatory flags that need to be set are --rbd-data-pool-name, and --k8s-cluster-name." 346 + "--cephfs-filesystem-name flag can also be passed in case of cephfs user restriction, so it can restrict user to particular cephfs filesystem" 347 + "sample run: `python3 /etc/ceph/create-external-cluster-resources.py --cephfs-filesystem-name myfs --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --restricted-auth-permission true`" 348 + "Note: Restricting the csi-users per pool, and per cluster will require creating new csi-users and new secrets for that csi-users." 349 + "So apply these secrets only to new `Consumer cluster` deployment while using the same `Source cluster`.", 350 ) 351 common_group.add_argument( 352 "--v2-port-enable", 353 action="store_true", 354 default=False, 355 help="Enable v2 mon port(3300) for mons", 356 ) 357 358 output_group = argP.add_argument_group("output") 359 output_group.add_argument( 360 "--format", 361 "-t", 362 choices=["json", "bash"], 363 default="json", 364 help="Provides the output format (json | bash)", 365 ) 366 output_group.add_argument( 367 "--output", 368 "-o", 369 default="", 370 help="Output will be stored into the provided file", 371 ) 372 output_group.add_argument( 373 "--cephfs-filesystem-name", 374 default="", 375 help="Provides the name of the Ceph filesystem", 376 ) 377 output_group.add_argument( 378 "--cephfs-metadata-pool-name", 379 default="", 380 help="Provides the name of the cephfs metadata pool", 381 ) 382 output_group.add_argument( 383 "--cephfs-data-pool-name", 384 default="", 385 help="Provides the name of the cephfs data pool", 386 ) 387 output_group.add_argument( 388 "--rbd-data-pool-name", 389 default="", 390 required=False, 391 help="Provides the name of the RBD datapool", 392 ) 393 output_group.add_argument( 394 "--alias-rbd-data-pool-name", 395 default="", 396 required=False, 397 help="Provides an alias for the RBD data pool name, necessary if a special character is present in the pool name such as a period or underscore", 398 ) 399 output_group.add_argument( 400 "--rgw-endpoint", 401 default="", 402 required=False, 403 help="RADOS Gateway endpoint (in `<IPv4>:<PORT>` or `<[IPv6]>:<PORT>` or `<FQDN>:<PORT>` format)", 404 ) 405 output_group.add_argument( 406 "--rgw-tls-cert-path", 407 default="", 408 required=False, 409 help="RADOS Gateway endpoint TLS certificate", 410 ) 411 output_group.add_argument( 412 "--rgw-skip-tls", 413 required=False, 414 default=False, 415 help="Ignore TLS certification validation when a self-signed certificate is provided (NOT RECOMMENDED", 416 ) 417 output_group.add_argument( 418 "--monitoring-endpoint", 419 default="", 420 required=False, 421 help="Ceph Manager prometheus exporter endpoints (comma separated list of (format `<IPv4>` or `<[IPv6]>` or `<FQDN>`) entries of active and standby mgrs)", 422 ) 423 output_group.add_argument( 424 "--monitoring-endpoint-port", 425 default="", 426 required=False, 427 help="Ceph Manager prometheus exporter port", 428 ) 429 output_group.add_argument( 430 "--skip-monitoring-endpoint", 431 default=False, 432 action="store_true", 433 help="Do not check for a monitoring endpoint for the Ceph cluster", 434 ) 435 output_group.add_argument( 436 "--rbd-metadata-ec-pool-name", 437 default="", 438 required=False, 439 help="Provides the name of erasure coded RBD metadata pool", 440 ) 441 output_group.add_argument( 442 "--dry-run", 443 default=False, 444 action="store_true", 445 help="Dry run prints the executed commands without running them", 446 ) 447 output_group.add_argument( 448 "--rados-namespace", 449 default="", 450 required=False, 451 help="Divides a pool into separate logical namespaces, used for creating RBD PVC in a CephBlockPoolRadosNamespace (should be lower case)", 452 ) 453 output_group.add_argument( 454 "--subvolume-group", 455 default="", 456 required=False, 457 help="provides the name of the subvolume group", 458 ) 459 output_group.add_argument( 460 "--rgw-realm-name", 461 default="", 462 required=False, 463 help="provides the name of the rgw-realm", 464 ) 465 output_group.add_argument( 466 "--rgw-zone-name", 467 default="", 468 required=False, 469 help="provides the name of the rgw-zone", 470 ) 471 output_group.add_argument( 472 "--rgw-zonegroup-name", 473 default="", 474 required=False, 475 help="provides the name of the rgw-zonegroup", 476 ) 477 output_group.add_argument( 478 "--topology-pools", 479 default="", 480 required=False, 481 help="comma-separated list of topology-constrained rbd pools", 482 ) 483 output_group.add_argument( 484 "--topology-failure-domain-label", 485 default="", 486 required=False, 487 help="k8s cluster failure domain label (example: zone, rack, or host) for the topology-pools that match the ceph domain", 488 ) 489 output_group.add_argument( 490 "--topology-failure-domain-values", 491 default="", 492 required=False, 493 help="comma-separated list of the k8s cluster failure domain values corresponding to each of the pools in the `topology-pools` list", 494 ) 495 496 upgrade_group = argP.add_argument_group("upgrade") 497 upgrade_group.add_argument( 498 "--upgrade", 499 action="store_true", 500 default=False, 501 help="Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) and client.healthchecker ceph users with new permissions needed for the new cluster version and older permission will still be applied." 502 + "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade`, this will upgrade all the default csi users(non-restricted)" 503 + "For restricted users(For example: client.csi-cephfs-provisioner-openshift-storage-myfs), users created using --restricted-auth-permission flag need to pass mandatory flags" 504 + "mandatory flags: '--rbd-data-pool-name, --k8s-cluster-name and --run-as-user' flags while upgrading" 505 + "in case of cephfs users if you have passed --cephfs-filesystem-name flag while creating user then while upgrading it will be mandatory too" 506 + "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool`" 507 + "PS: An existing non-restricted user cannot be converted to a restricted user by upgrading." 508 + "Upgrade flag should only be used to append new permissions to users, it shouldn't be used for changing user already applied permission, for example you shouldn't change in which pool user has access", 509 ) 510 511 if args_to_parse: 512 assert ( 513 type(args_to_parse) == list 514 ), "Argument to 'gen_arg_parser' should be a list" 515 else: 516 args_to_parse = sys.argv[1:] 517 return argP.parse_args(args_to_parse) 518 519 def validate_rbd_metadata_ec_pool_name(self): 520 if self._arg_parser.rbd_metadata_ec_pool_name: 521 rbd_metadata_ec_pool_name = self._arg_parser.rbd_metadata_ec_pool_name 522 rbd_pool_name = self._arg_parser.rbd_data_pool_name 523 524 if rbd_pool_name == "": 525 raise ExecutionFailureException( 526 "Flag '--rbd-data-pool-name' should not be empty" 527 ) 528 529 if rbd_metadata_ec_pool_name == "": 530 raise ExecutionFailureException( 531 "Flag '--rbd-metadata-ec-pool-name' should not be empty" 532 ) 533 534 cmd_json = {"prefix": "osd dump", "format": "json"} 535 ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) 536 if ret_val != 0 or len(json_out) == 0: 537 raise ExecutionFailureException( 538 f"{cmd_json['prefix']} command failed.\n" 539 f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}" 540 ) 541 metadata_pool_exist, pool_exist = False, False 542 543 for key in json_out["pools"]: 544 # if erasure_code_profile is empty and pool name exists then it replica pool 545 if ( 546 key["erasure_code_profile"] == "" 547 and key["pool_name"] == rbd_metadata_ec_pool_name 548 ): 549 metadata_pool_exist = True 550 # if erasure_code_profile is not empty and pool name exists then it is ec pool 551 if key["erasure_code_profile"] and key["pool_name"] == rbd_pool_name: 552 pool_exist = True 553 554 if not metadata_pool_exist: 555 raise ExecutionFailureException( 556 "Provided rbd_ec_metadata_pool name," 557 f" {rbd_metadata_ec_pool_name}, does not exist" 558 ) 559 if not pool_exist: 560 raise ExecutionFailureException( 561 f"Provided rbd_data_pool name, {rbd_pool_name}, does not exist" 562 ) 563 return rbd_metadata_ec_pool_name 564 565 def dry_run(self, msg): 566 if self._arg_parser.dry_run: 567 print("Execute: " + "'" + msg + "'") 568 569 def validate_rgw_endpoint_tls_cert(self): 570 if self._arg_parser.rgw_tls_cert_path: 571 with open(self._arg_parser.rgw_tls_cert_path, encoding="utf8") as f: 572 contents = f.read() 573 return contents.rstrip() 574 575 def _check_conflicting_options(self): 576 if not self._arg_parser.upgrade and not self._arg_parser.rbd_data_pool_name: 577 raise ExecutionFailureException( 578 "Either '--upgrade' or '--rbd-data-pool-name <pool_name>' should be specified" 579 ) 580 581 def _invalid_endpoint(self, endpoint_str): 582 # extract the port by getting the last split on `:` delimiter 583 try: 584 endpoint_str_ip, port = endpoint_str.rsplit(":", 1) 585 except ValueError: 586 raise ExecutionFailureException(f"Not a proper endpoint: {endpoint_str}") 587 588 try: 589 if endpoint_str_ip[0] == "[": 590 endpoint_str_ip = endpoint_str_ip[1 : len(endpoint_str_ip) - 1] 591 ip_type = ( 592 "IPv4" if type(ip_address(endpoint_str_ip)) is IPv4Address else "IPv6" 593 ) 594 except ValueError: 595 ip_type = "FQDN" 596 if not port.isdigit(): 597 raise ExecutionFailureException(f"Port not valid: {port}") 598 intPort = int(port) 599 if intPort < 1 or intPort > 2**16 - 1: 600 raise ExecutionFailureException(f"Out of range port number: {port}") 601 602 return ip_type 603 604 def endpoint_dial(self, endpoint_str, ip_type, timeout=3, cert=None): 605 # if the 'cluster' instance is a dummy one, 606 # don't try to reach out to the endpoint 607 if isinstance(self.cluster, DummyRados): 608 return "", "", "" 609 if ip_type == "IPv6": 610 try: 611 endpoint_str_ip, endpoint_str_port = endpoint_str.rsplit(":", 1) 612 except ValueError: 613 raise ExecutionFailureException( 614 f"Not a proper endpoint: {endpoint_str}" 615 ) 616 if endpoint_str_ip[0] != "[": 617 endpoint_str_ip = "[" + endpoint_str_ip + "]" 618 endpoint_str = ":".join([endpoint_str_ip, endpoint_str_port]) 619 620 protocols = ["http", "https"] 621 response_error = None 622 for prefix in protocols: 623 try: 624 ep = f"{prefix}://{endpoint_str}" 625 verify = None 626 # If verify is set to a path to a directory, 627 # the directory must have been processed using the c_rehash utility supplied with OpenSSL. 628 if prefix == "https" and self._arg_parser.rgw_skip_tls: 629 verify = False 630 r = requests.head(ep, timeout=timeout, verify=False) 631 elif prefix == "https" and cert: 632 verify = cert 633 r = requests.head(ep, timeout=timeout, verify=cert) 634 else: 635 r = requests.head(ep, timeout=timeout) 636 if r.status_code == 200: 637 return prefix, verify, "" 638 except Exception as err: 639 response_error = err 640 continue 641 sys.stderr.write( 642 f"unable to connect to endpoint: {endpoint_str}, failed error: {response_error}" 643 ) 644 return ( 645 "", 646 "", 647 ("-1"), 648 ) 649 650 def __init__(self, arg_list=None): 651 self.out_map = {} 652 self._excluded_keys = set() 653 self._arg_parser = self.gen_arg_parser(args_to_parse=arg_list) 654 self._check_conflicting_options() 655 self.run_as_user = self._arg_parser.run_as_user 656 self.output_file = self._arg_parser.output 657 self.ceph_conf = self._arg_parser.ceph_conf 658 self.ceph_keyring = self._arg_parser.keyring 659 # if user not provided, give a default user 660 if not self.run_as_user and not self._arg_parser.upgrade: 661 self.run_as_user = self.EXTERNAL_USER_NAME 662 if not self._arg_parser.rgw_pool_prefix and not self._arg_parser.upgrade: 663 self._arg_parser.rgw_pool_prefix = self.DEFAULT_RGW_POOL_PREFIX 664 if self.ceph_conf: 665 kwargs = {} 666 if self.ceph_keyring: 667 kwargs["conf"] = {"keyring": self.ceph_keyring} 668 self.cluster = rados.Rados(conffile=self.ceph_conf, **kwargs) 669 else: 670 self.cluster = rados.Rados() 671 self.cluster.conf_read_file() 672 self.cluster.connect() 673 674 def shutdown(self): 675 if self.cluster.state == "connected": 676 self.cluster.shutdown() 677 678 def get_fsid(self): 679 if self._arg_parser.dry_run: 680 return self.dry_run("ceph fsid") 681 return str(self.cluster.get_fsid()) 682 683 def _common_cmd_json_gen(self, cmd_json): 684 cmd = json.dumps(cmd_json, sort_keys=True) 685 ret_val, cmd_out, err_msg = self.cluster.mon_command(cmd, b"") 686 if self._arg_parser.verbose: 687 print(f"Command Input: {cmd}") 688 print( 689 f"Return Val: {ret_val}\nCommand Output: {cmd_out}\n" 690 f"Error Message: {err_msg}\n----------\n" 691 ) 692 json_out = {} 693 # if there is no error (i.e; ret_val is ZERO) and 'cmd_out' is not empty 694 # then convert 'cmd_out' to a json output 695 if ret_val == 0 and cmd_out: 696 json_out = json.loads(cmd_out) 697 return ret_val, json_out, err_msg 698 699 def get_ceph_external_mon_data(self): 700 cmd_json = {"prefix": "quorum_status", "format": "json"} 701 if self._arg_parser.dry_run: 702 return self.dry_run("ceph " + cmd_json["prefix"]) 703 ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) 704 # if there is an unsuccessful attempt, 705 if ret_val != 0 or len(json_out) == 0: 706 raise ExecutionFailureException( 707 "'quorum_status' command failed.\n" 708 f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}" 709 ) 710 q_leader_name = json_out["quorum_leader_name"] 711 q_leader_details = {} 712 q_leader_matching_list = [ 713 l for l in json_out["monmap"]["mons"] if l["name"] == q_leader_name 714 ] 715 if len(q_leader_matching_list) == 0: 716 raise ExecutionFailureException("No matching 'mon' details found") 717 q_leader_details = q_leader_matching_list[0] 718 # get the address vector of the quorum-leader 719 q_leader_addrvec = q_leader_details.get("public_addrs", {}).get("addrvec", []) 720 ip_addr = str(q_leader_details["public_addr"].split("/")[0]) 721 722 if self._arg_parser.v2_port_enable: 723 if q_leader_addrvec[0]["type"] == "v2": 724 ip_addr = q_leader_addrvec[0]["addr"] 725 elif len(q_leader_addrvec) > 1 and q_leader_addrvec[1]["type"] == "v2": 726 ip_addr = q_leader_addrvec[1]["addr"] 727 else: 728 sys.stderr.write( 729 "'v2' address type not present, and 'v2-port-enable' flag is provided" 730 ) 731 732 return f"{str(q_leader_name)}={ip_addr}" 733 734 def _convert_hostname_to_ip(self, host_name, port, ip_type): 735 # if 'cluster' instance is a dummy type, 736 # call the dummy instance's "convert" method 737 if not host_name: 738 raise ExecutionFailureException("Empty hostname provided") 739 if isinstance(self.cluster, DummyRados): 740 return self.cluster._convert_hostname_to_ip(host_name) 741 742 if ip_type == "FQDN": 743 # check which ip FQDN should be converted to, IPv4 or IPv6 744 # check the host ip, the endpoint ip type would be similar to host ip 745 cmd_json = {"prefix": "orch host ls", "format": "json"} 746 ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) 747 # if there is an unsuccessful attempt, 748 if ret_val != 0 or len(json_out) == 0: 749 raise ExecutionFailureException( 750 "'orch host ls' command failed.\n" 751 f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}" 752 ) 753 host_addr = json_out[0]["addr"] 754 # add :80 sample port in ip_type, as _invalid_endpoint also verify port 755 host_ip_type = self._invalid_endpoint(host_addr + ":80") 756 import socket 757 758 # example output [(<AddressFamily.AF_INET: 2>, <SocketKind.SOCK_STREAM: 1>, 6, '', ('93.184.216.34', 80)), ...] 759 # we need to get 93.184.216.34 so it would be ip[0][4][0] 760 if host_ip_type == "IPv6": 761 ip = socket.getaddrinfo( 762 host_name, port, family=socket.AF_INET6, proto=socket.IPPROTO_TCP 763 ) 764 elif host_ip_type == "IPv4": 765 ip = socket.getaddrinfo( 766 host_name, port, family=socket.AF_INET, proto=socket.IPPROTO_TCP 767 ) 768 del socket 769 return ip[0][4][0] 770 return host_name 771 772 def get_active_and_standby_mgrs(self): 773 if self._arg_parser.dry_run: 774 return "", self.dry_run("ceph status") 775 monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port 776 monitoring_endpoint_ip_list = self._arg_parser.monitoring_endpoint 777 standby_mgrs = [] 778 if not monitoring_endpoint_ip_list: 779 cmd_json = {"prefix": "status", "format": "json"} 780 ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) 781 # if there is an unsuccessful attempt, 782 if ret_val != 0 or len(json_out) == 0: 783 raise ExecutionFailureException( 784 "'mgr services' command failed.\n" 785 f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}" 786 ) 787 monitoring_endpoint = ( 788 json_out.get("mgrmap", {}).get("services", {}).get("prometheus", "") 789 ) 790 if not monitoring_endpoint: 791 raise ExecutionFailureException( 792 "can't find monitoring_endpoint, prometheus module might not be enabled, " 793 "enable the module by running 'ceph mgr module enable prometheus'" 794 ) 795 # now check the stand-by mgr-s 796 standby_arr = json_out.get("mgrmap", {}).get("standbys", []) 797 for each_standby in standby_arr: 798 if "name" in each_standby.keys(): 799 standby_mgrs.append(each_standby["name"]) 800 try: 801 parsed_endpoint = urlparse(monitoring_endpoint) 802 except ValueError: 803 raise ExecutionFailureException( 804 f"invalid endpoint: {monitoring_endpoint}" 805 ) 806 monitoring_endpoint_ip_list = parsed_endpoint.hostname 807 if not monitoring_endpoint_port: 808 monitoring_endpoint_port = str(parsed_endpoint.port) 809 810 # if monitoring endpoint port is not set, put a default mon port 811 if not monitoring_endpoint_port: 812 monitoring_endpoint_port = self.DEFAULT_MONITORING_ENDPOINT_PORT 813 814 # user could give comma and space separated inputs (like --monitoring-endpoint="<ip1>, <ip2>") 815 monitoring_endpoint_ip_list = monitoring_endpoint_ip_list.replace(",", " ") 816 monitoring_endpoint_ip_list_split = monitoring_endpoint_ip_list.split() 817 # if monitoring-endpoint could not be found, raise an error 818 if len(monitoring_endpoint_ip_list_split) == 0: 819 raise ExecutionFailureException("No 'monitoring-endpoint' found") 820 # first ip is treated as the main monitoring-endpoint 821 monitoring_endpoint_ip = monitoring_endpoint_ip_list_split[0] 822 # rest of the ip-s are added to the 'standby_mgrs' list 823 standby_mgrs.extend(monitoring_endpoint_ip_list_split[1:]) 824 failed_ip = monitoring_endpoint_ip 825 826 monitoring_endpoint = ":".join( 827 [monitoring_endpoint_ip, monitoring_endpoint_port] 828 ) 829 ip_type = self._invalid_endpoint(monitoring_endpoint) 830 try: 831 monitoring_endpoint_ip = self._convert_hostname_to_ip( 832 monitoring_endpoint_ip, monitoring_endpoint_port, ip_type 833 ) 834 # collect all the 'stand-by' mgr ips 835 mgr_ips = [] 836 for each_standby_mgr in standby_mgrs: 837 failed_ip = each_standby_mgr 838 mgr_ips.append( 839 self._convert_hostname_to_ip( 840 each_standby_mgr, monitoring_endpoint_port, ip_type 841 ) 842 ) 843 except: 844 raise ExecutionFailureException( 845 f"Conversion of host: {failed_ip} to IP failed. " 846 "Please enter the IP addresses of all the ceph-mgrs with the '--monitoring-endpoint' flag" 847 ) 848 849 _, _, err = self.endpoint_dial(monitoring_endpoint, ip_type) 850 if err == "-1": 851 raise ExecutionFailureException(err) 852 # add the validated active mgr IP into the first index 853 mgr_ips.insert(0, monitoring_endpoint_ip) 854 all_mgr_ips_str = ",".join(mgr_ips) 855 return all_mgr_ips_str, monitoring_endpoint_port 856 857 def check_user_exist(self, user): 858 cmd_json = {"prefix": "auth get", "entity": f"{user}", "format": "json"} 859 ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json) 860 if ret_val != 0 or len(json_out) == 0: 861 return "" 862 return str(json_out[0]["key"]) 863 864 def get_cephfs_provisioner_caps_and_entity(self): 865 entity = "client.csi-cephfs-provisioner" 866 caps = { 867 "mon": "allow r, allow command 'osd blocklist'", 868 "mgr": "allow rw", 869 "osd": "allow rw tag cephfs metadata=*", 870 } 871 if self._arg_parser.restricted_auth_permission: 872 k8s_cluster_name = self._arg_parser.k8s_cluster_name 873 if k8s_cluster_name == "": 874 raise ExecutionFailureException( 875 "k8s_cluster_name not found, please set the '--k8s-cluster-name' flag" 876 ) 877 cephfs_filesystem = self._arg_parser.cephfs_filesystem_name 878 if cephfs_filesystem == "": 879 entity = f"{entity}-{k8s_cluster_name}" 880 else: 881 entity = f"{entity}-{k8s_cluster_name}-{cephfs_filesystem}" 882 caps["osd"] = f"allow rw tag cephfs metadata={cephfs_filesystem}" 883 884 return caps, entity 885 886 def get_cephfs_node_caps_and_entity(self): 887 entity = "client.csi-cephfs-node" 888 caps = { 889 "mon": "allow r, allow command 'osd blocklist'", 890 "mgr": "allow rw", 891 "osd": "allow rw tag cephfs *=*", 892 "mds": "allow rw", 893 } 894 if self._arg_parser.restricted_auth_permission: 895 k8s_cluster_name = self._arg_parser.k8s_cluster_name 896 if k8s_cluster_name == "": 897 raise ExecutionFailureException( 898 "k8s_cluster_name not found, please set the '--k8s-cluster-name' flag" 899 ) 900 cephfs_filesystem = self._arg_parser.cephfs_filesystem_name 901 if cephfs_filesystem == "": 902 entity = f"{entity}-{k8s_cluster_name}" 903 else: 904 entity = f"{entity}-{k8s_cluster_name}-{cephfs_filesystem}" 905 caps["osd"] = f"allow rw tag cephfs *={cephfs_filesystem}" 906 907 return caps, entity 908 909 def get_entity( 910 self, 911 entity, 912 rbd_pool_name, 913 alias_rbd_pool_name, 914 k8s_cluster_name, 915 rados_namespace, 916 ): 917 if ( 918 rbd_pool_name.count(".") != 0 919 or rbd_pool_name.count("_") != 0 920 or alias_rbd_pool_name != "" 921 # checking alias_rbd_pool_name is not empty as there maybe a special character used other than . or _ 922 ): 923 if alias_rbd_pool_name == "": 924 raise ExecutionFailureException( 925 "please set the '--alias-rbd-data-pool-name' flag as the rbd data pool name contains '.' or '_'" 926 ) 927 if ( 928 alias_rbd_pool_name.count(".") != 0 929 or alias_rbd_pool_name.count("_") != 0 930 ): 931 raise ExecutionFailureException( 932 "'--alias-rbd-data-pool-name' flag value should not contain '.' or '_'" 933 ) 934 entity = f"{entity}-{k8s_cluster_name}-{alias_rbd_pool_name}" 935 else: 936 entity = f"{entity}-{k8s_cluster_name}-{rbd_pool_name}" 937 938 if rados_namespace: 939 entity = f"{entity}-{rados_namespace}" 940 return entity 941 942 def get_rbd_provisioner_caps_and_entity(self): 943 entity = "client.csi-rbd-provisioner" 944 caps = { 945 "mon": "profile rbd, allow command 'osd blocklist'", 946 "mgr": "allow rw", 947 "osd": "profile rbd", 948 } 949 if self._arg_parser.restricted_auth_permission: 950 rbd_pool_name = self._arg_parser.rbd_data_pool_name 951 alias_rbd_pool_name = self._arg_parser.alias_rbd_data_pool_name 952 k8s_cluster_name = self._arg_parser.k8s_cluster_name 953 rados_namespace = self._arg_parser.rados_namespace 954 if rbd_pool_name == "": 955 raise ExecutionFailureException( 956 "mandatory flag not found, please set the '--rbd-data-pool-name' flag" 957 ) 958 if k8s_cluster_name == "": 959 raise ExecutionFailureException( 960 "mandatory flag not found, please set the '--k8s-cluster-name' flag" 961 ) 962 entity = self.get_entity( 963 entity, 964 rbd_pool_name, 965 alias_rbd_pool_name, 966 k8s_cluster_name, 967 rados_namespace, 968 ) 969 if rados_namespace != "": 970 caps["osd"] = ( 971 f"profile rbd pool={rbd_pool_name} namespace={rados_namespace}" 972 ) 973 else: 974 caps["osd"] = f"profile rbd pool={rbd_pool_name}" 975 976 return caps, entity 977 978 def get_rbd_node_caps_and_entity(self): 979 entity = "client.csi-rbd-node" 980 caps = { 981 "mon": "profile rbd, allow command 'osd blocklist'", 982 "osd": "profile rbd", 983 } 984 if self._arg_parser.restricted_auth_permission: 985 rbd_pool_name = self._arg_parser.rbd_data_pool_name 986 alias_rbd_pool_name = self._arg_parser.alias_rbd_data_pool_name 987 k8s_cluster_name = self._arg_parser.k8s_cluster_name 988 rados_namespace = self._arg_parser.rados_namespace 989 if rbd_pool_name == "": 990 raise ExecutionFailureException( 991 "mandatory flag not found, please set the '--rbd-data-pool-name' flag" 992 ) 993 if k8s_cluster_name == "": 994 raise ExecutionFailureException( 995 "mandatory flag not found, please set the '--k8s-cluster-name' flag" 996 ) 997 entity = self.get_entity( 998 entity, 999 rbd_pool_name, 1000 alias_rbd_pool_name, 1001 k8s_cluster_name, 1002 rados_namespace, 1003 ) 1004 if rados_namespace != "": 1005 caps["osd"] = ( 1006 f"profile rbd pool={rbd_pool_name} namespace={rados_namespace}" 1007 ) 1008 else: 1009 caps["osd"] = f"profile rbd pool={rbd_pool_name}" 1010 1011 return caps, entity 1012 1013 def get_defaultUser_caps_and_entity(self): 1014 entity = self.run_as_user 1015 caps = { 1016 "mon": "allow r, allow command quorum_status, allow command version", 1017 "mgr": "allow command config", 1018 "osd": f"profile rbd-read-only, allow rwx pool={self._arg_parser.rgw_pool_prefix}.rgw.meta, allow r pool=.rgw.root, allow rw pool={self._arg_parser.rgw_pool_prefix}.rgw.control, allow rx pool={self._arg_parser.rgw_pool_prefix}.rgw.log, allow x pool={self._arg_parser.rgw_pool_prefix}.rgw.buckets.index", 1019 } 1020 1021 return caps, entity 1022 1023 def get_caps_and_entity(self, user_name): 1024 if "client.csi-cephfs-provisioner" in user_name: 1025 if "client.csi-cephfs-provisioner" != user_name: 1026 self._arg_parser.restricted_auth_permission = True 1027 return self.get_cephfs_provisioner_caps_and_entity() 1028 if "client.csi-cephfs-node" in user_name: 1029 if "client.csi-cephfs-node" != user_name: 1030 self._arg_parser.restricted_auth_permission = True 1031 return self.get_cephfs_node_caps_and_entity() 1032 if "client.csi-rbd-provisioner" in user_name: 1033 if "client.csi-rbd-provisioner" != user_name: 1034 self._arg_parser.restricted_auth_permission = True 1035 return self.get_rbd_provisioner_caps_and_entity() 1036 if "client.csi-rbd-node" in user_name: 1037 if "client.csi-rbd-node" != user_name: 1038 self._arg_parser.restricted_auth_permission = True 1039 return self.get_rbd_node_caps_and_entity() 1040 if "client.healthchecker" in user_name: 1041 if "client.healthchecker" != user_name: 1042 self._arg_parser.restricted_auth_permission = True 1043 return self.get_defaultUser_caps_and_entity() 1044 1045 raise ExecutionFailureException( 1046 f"no user found with user_name: {user_name}, " 1047 "get_caps_and_entity command failed.\n" 1048 ) 1049 1050 def create_cephCSIKeyring_user(self, user): 1051 """ 1052 command: ceph auth get-or-create client.csi-cephfs-provisioner mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*' 1053 """ 1054 caps, entity = self.get_caps_and_entity(user) 1055 cmd_json = { 1056 "prefix": "auth get-or-create", 1057 "entity": entity, 1058 "caps": [cap for cap_list in list(caps.items()) for cap in cap_list], 1059 "format": "json", 1060 } 1061 1062 if self._arg_parser.dry_run: 1063 return ( 1064 self.dry_run( 1065 "ceph " 1066 + cmd_json["prefix"] 1067 + " " 1068 + cmd_json["entity"] 1069 + " " 1070 + " ".join(cmd_json["caps"]) 1071 ), 1072 "", 1073 ) 1074 # check if user already exist 1075 user_key = self.check_user_exist(entity) 1076 if user_key != "": 1077 return user_key, f"{entity.split('.', 1)[1]}" 1078 # entity.split('.',1)[1] to rename entity(client.csi-rbd-node) as csi-rbd-node 1079 1080 ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) 1081 # if there is an unsuccessful attempt, 1082 if ret_val != 0 or len(json_out) == 0: 1083 raise ExecutionFailureException( 1084 f"'auth get-or-create {user}' command failed.\n" 1085 f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}" 1086 ) 1087 return str(json_out[0]["key"]), f"{entity.split('.', 1)[1]}" 1088 # entity.split('.',1)[1] to rename entity(client.csi-rbd-node) as csi-rbd-node 1089 1090 def get_cephfs_data_pool_details(self): 1091 cmd_json = {"prefix": "fs ls", "format": "json"} 1092 if self._arg_parser.dry_run: 1093 return self.dry_run("ceph " + cmd_json["prefix"]) 1094 ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) 1095 # if there is an unsuccessful attempt, report an error 1096 if ret_val != 0: 1097 # if fs and data_pool arguments are not set, silently return 1098 if ( 1099 self._arg_parser.cephfs_filesystem_name == "" 1100 and self._arg_parser.cephfs_data_pool_name == "" 1101 ): 1102 return 1103 # if user has provided any of the 1104 # '--cephfs-filesystem-name' or '--cephfs-data-pool-name' arguments, 1105 # raise an exception as we are unable to verify the args 1106 raise ExecutionFailureException( 1107 f"'fs ls' ceph call failed with error: {err_msg}" 1108 ) 1109 1110 matching_json_out = {} 1111 # if '--cephfs-filesystem-name' argument is provided, 1112 # check whether the provided filesystem-name exists or not 1113 if self._arg_parser.cephfs_filesystem_name: 1114 # get the matching list 1115 matching_json_out_list = [ 1116 matched 1117 for matched in json_out 1118 if str(matched["name"]) == self._arg_parser.cephfs_filesystem_name 1119 ] 1120 # unable to find a matching fs-name, raise an error 1121 if len(matching_json_out_list) == 0: 1122 raise ExecutionFailureException( 1123 f"Filesystem provided, '{self._arg_parser.cephfs_filesystem_name}', " 1124 f"is not found in the fs-list: {[str(x['name']) for x in json_out]}" 1125 ) 1126 matching_json_out = matching_json_out_list[0] 1127 # if cephfs filesystem name is not provided, 1128 # try to get a default fs name by doing the following 1129 else: 1130 # a. check if there is only one filesystem is present 1131 if len(json_out) == 1: 1132 matching_json_out = json_out[0] 1133 # b. or else, check if data_pool name is provided 1134 elif self._arg_parser.cephfs_data_pool_name: 1135 # and if present, check whether there exists a fs which has the data_pool 1136 for eachJ in json_out: 1137 if self._arg_parser.cephfs_data_pool_name in eachJ["data_pools"]: 1138 matching_json_out = eachJ 1139 break 1140 # if there is no matching fs exists, that means provided data_pool name is invalid 1141 if not matching_json_out: 1142 raise ExecutionFailureException( 1143 f"Provided data_pool name, {self._arg_parser.cephfs_data_pool_name}," 1144 " does not exists" 1145 ) 1146 # c. if nothing is set and couldn't find a default, 1147 else: 1148 # just return silently 1149 return 1150 1151 if matching_json_out: 1152 self._arg_parser.cephfs_filesystem_name = str(matching_json_out["name"]) 1153 self._arg_parser.cephfs_metadata_pool_name = str( 1154 matching_json_out["metadata_pool"] 1155 ) 1156 1157 if isinstance(matching_json_out["data_pools"], list): 1158 # if the user has already provided data-pool-name, 1159 # through --cephfs-data-pool-name 1160 if self._arg_parser.cephfs_data_pool_name: 1161 # if the provided name is not matching with the one in the list 1162 if ( 1163 self._arg_parser.cephfs_data_pool_name 1164 not in matching_json_out["data_pools"] 1165 ): 1166 raise ExecutionFailureException( 1167 f"Provided data-pool-name: '{self._arg_parser.cephfs_data_pool_name}', " 1168 "doesn't match from the data-pools list: " 1169 f"{[str(x) for x in matching_json_out['data_pools']]}" 1170 ) 1171 # if data_pool name is not provided, 1172 # then try to find a default data pool name 1173 else: 1174 # if no data_pools exist, silently return 1175 if len(matching_json_out["data_pools"]) == 0: 1176 return 1177 self._arg_parser.cephfs_data_pool_name = str( 1178 matching_json_out["data_pools"][0] 1179 ) 1180 # if there are more than one 'data_pools' exist, 1181 # then warn the user that we are using the selected name 1182 if len(matching_json_out["data_pools"]) > 1: 1183 print( 1184 "WARNING: Multiple data pools detected: " 1185 f"{[str(x) for x in matching_json_out['data_pools']]}\n" 1186 f"Using the data-pool: '{self._arg_parser.cephfs_data_pool_name}'\n" 1187 ) 1188 1189 def create_checkerKey(self, user): 1190 caps, entity = self.get_caps_and_entity(user) 1191 cmd_json = { 1192 "prefix": "auth get-or-create", 1193 "entity": entity, 1194 "caps": [cap for cap_list in list(caps.items()) for cap in cap_list], 1195 "format": "json", 1196 } 1197 1198 if self._arg_parser.dry_run: 1199 return self.dry_run( 1200 "ceph " 1201 + cmd_json["prefix"] 1202 + " " 1203 + cmd_json["entity"] 1204 + " " 1205 + " ".join(cmd_json["caps"]) 1206 ) 1207 # check if user already exist 1208 user_key = self.check_user_exist(entity) 1209 if user_key != "": 1210 return user_key 1211 1212 ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) 1213 # if there is an unsuccessful attempt, 1214 if ret_val != 0 or len(json_out) == 0: 1215 raise ExecutionFailureException( 1216 f"'auth get-or-create {self.run_as_user}' command failed\n" 1217 f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}" 1218 ) 1219 return str(json_out[0]["key"]) 1220 1221 def get_ceph_dashboard_link(self): 1222 cmd_json = {"prefix": "mgr services", "format": "json"} 1223 if self._arg_parser.dry_run: 1224 return self.dry_run("ceph " + cmd_json["prefix"]) 1225 ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json) 1226 # if there is an unsuccessful attempt, 1227 if ret_val != 0 or len(json_out) == 0: 1228 return None 1229 if "dashboard" not in json_out: 1230 return None 1231 return json_out["dashboard"] 1232 1233 def create_rgw_admin_ops_user(self): 1234 cmd = [ 1235 "radosgw-admin", 1236 "user", 1237 "create", 1238 "--uid", 1239 self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME, 1240 "--display-name", 1241 "Rook RGW Admin Ops user", 1242 "--caps", 1243 "buckets=*;users=*;usage=read;metadata=read;zone=read", 1244 "--rgw-realm", 1245 self._arg_parser.rgw_realm_name, 1246 "--rgw-zonegroup", 1247 self._arg_parser.rgw_zonegroup_name, 1248 "--rgw-zone", 1249 self._arg_parser.rgw_zone_name, 1250 ] 1251 if self._arg_parser.dry_run: 1252 return self.dry_run("ceph " + " ".join(cmd)) 1253 try: 1254 output = subprocess.check_output(cmd, stderr=subprocess.PIPE) 1255 except subprocess.CalledProcessError as execErr: 1256 # if the user already exists, we just query it 1257 if execErr.returncode == errno.EEXIST: 1258 cmd = [ 1259 "radosgw-admin", 1260 "user", 1261 "info", 1262 "--uid", 1263 self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME, 1264 "--rgw-realm", 1265 self._arg_parser.rgw_realm_name, 1266 "--rgw-zonegroup", 1267 self._arg_parser.rgw_zonegroup_name, 1268 "--rgw-zone", 1269 self._arg_parser.rgw_zone_name, 1270 ] 1271 try: 1272 output = subprocess.check_output(cmd, stderr=subprocess.PIPE) 1273 except subprocess.CalledProcessError as execErr: 1274 err_msg = ( 1275 f"failed to execute command {cmd}. Output: {execErr.output}. " 1276 f"Code: {execErr.returncode}. Error: {execErr.stderr}" 1277 ) 1278 sys.stderr.write(err_msg) 1279 return None, None, False, "-1" 1280 else: 1281 err_msg = ( 1282 f"failed to execute command {cmd}. Output: {execErr.output}. " 1283 f"Code: {execErr.returncode}. Error: {execErr.stderr}" 1284 ) 1285 sys.stderr.write(err_msg) 1286 return None, None, False, "-1" 1287 1288 # if it is python2, don't check for ceph version for adding `info=read` cap(rgw_validation) 1289 if sys.version_info.major < 3: 1290 jsonoutput = json.loads(output) 1291 return ( 1292 jsonoutput["keys"][0]["access_key"], 1293 jsonoutput["keys"][0]["secret_key"], 1294 False, 1295 "", 1296 ) 1297 1298 # separately add info=read caps for rgw-endpoint ip validation 1299 info_cap_supported = True 1300 cmd = [ 1301 "radosgw-admin", 1302 "caps", 1303 "add", 1304 "--uid", 1305 self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME, 1306 "--caps", 1307 "info=read", 1308 "--rgw-realm", 1309 self._arg_parser.rgw_realm_name, 1310 "--rgw-zonegroup", 1311 self._arg_parser.rgw_zonegroup_name, 1312 "--rgw-zone", 1313 self._arg_parser.rgw_zone_name, 1314 ] 1315 try: 1316 output = subprocess.check_output(cmd, stderr=subprocess.PIPE) 1317 except subprocess.CalledProcessError as execErr: 1318 # if the ceph version not supported for adding `info=read` cap(rgw_validation) 1319 if ( 1320 "could not add caps: unable to add caps: info=read\n" 1321 in execErr.stderr.decode("utf-8") 1322 and execErr.returncode == 244 1323 ): 1324 info_cap_supported = False 1325 else: 1326 err_msg = ( 1327 f"failed to execute command {cmd}. Output: {execErr.output}. " 1328 f"Code: {execErr.returncode}. Error: {execErr.stderr}" 1329 ) 1330 sys.stderr.write(err_msg) 1331 return None, None, False, "-1" 1332 1333 jsonoutput = json.loads(output) 1334 return ( 1335 jsonoutput["keys"][0]["access_key"], 1336 jsonoutput["keys"][0]["secret_key"], 1337 info_cap_supported, 1338 "", 1339 ) 1340 1341 def validate_rbd_pool(self, pool_name): 1342 if not self.cluster.pool_exists(pool_name): 1343 raise ExecutionFailureException( 1344 f"The provided pool, '{pool_name}', does not exist" 1345 ) 1346 1347 def init_rbd_pool(self, rbd_pool_name): 1348 if isinstance(self.cluster, DummyRados): 1349 return 1350 ioctx = self.cluster.open_ioctx(rbd_pool_name) 1351 rbd_inst = rbd.RBD() 1352 rbd_inst.pool_init(ioctx, True) 1353 1354 def validate_rados_namespace(self): 1355 rbd_pool_name = self._arg_parser.rbd_data_pool_name 1356 rados_namespace = self._arg_parser.rados_namespace 1357 if rados_namespace == "": 1358 return 1359 if rados_namespace.islower() == False: 1360 raise ExecutionFailureException( 1361 f"The provided rados Namespace, '{rados_namespace}', " 1362 f"contains upper case" 1363 ) 1364 rbd_inst = rbd.RBD() 1365 ioctx = self.cluster.open_ioctx(rbd_pool_name) 1366 if rbd_inst.namespace_exists(ioctx, rados_namespace) is False: 1367 raise ExecutionFailureException( 1368 f"The provided rados Namespace, '{rados_namespace}', " 1369 f"is not found in the pool '{rbd_pool_name}'" 1370 ) 1371 1372 def get_or_create_subvolume_group(self, subvolume_group, cephfs_filesystem_name): 1373 cmd = [ 1374 "ceph", 1375 "fs", 1376 "subvolumegroup", 1377 "getpath", 1378 cephfs_filesystem_name, 1379 subvolume_group, 1380 ] 1381 try: 1382 _ = subprocess.check_output(cmd, stderr=subprocess.PIPE) 1383 except subprocess.CalledProcessError: 1384 cmd = [ 1385 "ceph", 1386 "fs", 1387 "subvolumegroup", 1388 "create", 1389 cephfs_filesystem_name, 1390 subvolume_group, 1391 ] 1392 try: 1393 _ = subprocess.check_output(cmd, stderr=subprocess.PIPE) 1394 except subprocess.CalledProcessError: 1395 raise ExecutionFailureException( 1396 f"subvolume group {subvolume_group} is not able to get created" 1397 ) 1398 1399 def pin_subvolume( 1400 self, subvolume_group, cephfs_filesystem_name, pin_type, pin_setting 1401 ): 1402 cmd = [ 1403 "ceph", 1404 "fs", 1405 "subvolumegroup", 1406 "pin", 1407 cephfs_filesystem_name, 1408 subvolume_group, 1409 pin_type, 1410 pin_setting, 1411 ] 1412 try: 1413 _ = subprocess.check_output(cmd, stderr=subprocess.PIPE) 1414 except subprocess.CalledProcessError: 1415 raise ExecutionFailureException( 1416 f"subvolume group {subvolume_group} is not able to get pinned" 1417 ) 1418 1419 def get_rgw_fsid(self, base_url, verify): 1420 access_key = self.out_map["RGW_ADMIN_OPS_USER_ACCESS_KEY"] 1421 secret_key = self.out_map["RGW_ADMIN_OPS_USER_SECRET_KEY"] 1422 rgw_endpoint = self._arg_parser.rgw_endpoint 1423 base_url = base_url + "://" + rgw_endpoint + "/admin/info?" 1424 params = {"format": "json"} 1425 request_url = base_url + urlencode(params) 1426 1427 try: 1428 r = requests.get( 1429 request_url, 1430 auth=S3Auth(access_key, secret_key, rgw_endpoint), 1431 verify=verify, 1432 ) 1433 except requests.exceptions.Timeout: 1434 sys.stderr.write( 1435 f"invalid endpoint:, not able to call admin-ops api{rgw_endpoint}" 1436 ) 1437 return "", "-1" 1438 r1 = r.json() 1439 if r1 is None or r1.get("info") is None: 1440 sys.stderr.write( 1441 f"The provided rgw Endpoint, '{self._arg_parser.rgw_endpoint}', is invalid." 1442 ) 1443 return ( 1444 "", 1445 "-1", 1446 ) 1447 1448 return r1["info"]["storage_backends"][0]["cluster_id"], "" 1449 1450 def validate_rgw_endpoint(self, info_cap_supported): 1451 # if the 'cluster' instance is a dummy one, 1452 # don't try to reach out to the endpoint 1453 if isinstance(self.cluster, DummyRados): 1454 return 1455 1456 rgw_endpoint = self._arg_parser.rgw_endpoint 1457 1458 # validate rgw endpoint only if ip address is passed 1459 ip_type = self._invalid_endpoint(rgw_endpoint) 1460 1461 # check if the rgw endpoint is reachable 1462 cert = None 1463 if not self._arg_parser.rgw_skip_tls and self.validate_rgw_endpoint_tls_cert(): 1464 cert = self._arg_parser.rgw_tls_cert_path 1465 base_url, verify, err = self.endpoint_dial(rgw_endpoint, ip_type, cert=cert) 1466 if err != "": 1467 return "-1" 1468 1469 # check if the rgw endpoint belongs to the same cluster 1470 # only check if `info` cap is supported 1471 if info_cap_supported: 1472 fsid = self.get_fsid() 1473 rgw_fsid, err = self.get_rgw_fsid(base_url, verify) 1474 if err == "-1": 1475 return "-1" 1476 if fsid != rgw_fsid: 1477 sys.stderr.write( 1478 f"The provided rgw Endpoint, '{self._arg_parser.rgw_endpoint}', is invalid. We are validating by calling the adminops api through rgw-endpoint and validating the cluster_id '{rgw_fsid}' is equal to the ceph cluster fsid '{fsid}'" 1479 ) 1480 return "-1" 1481 1482 # check if the rgw endpoint pool exist 1483 # only validate if rgw_pool_prefix is passed else it will take default value and we don't create these default pools 1484 if self._arg_parser.rgw_pool_prefix != "default": 1485 rgw_pools_to_validate = [ 1486 f"{self._arg_parser.rgw_pool_prefix}.rgw.meta", 1487 ".rgw.root", 1488 f"{self._arg_parser.rgw_pool_prefix}.rgw.control", 1489 f"{self._arg_parser.rgw_pool_prefix}.rgw.log", 1490 ] 1491 for _rgw_pool_to_validate in rgw_pools_to_validate: 1492 if not self.cluster.pool_exists(_rgw_pool_to_validate): 1493 sys.stderr.write( 1494 f"The provided pool, '{_rgw_pool_to_validate}', does not exist" 1495 ) 1496 return "-1" 1497 1498 return "" 1499 1500 def validate_rgw_multisite(self, rgw_multisite_config_name, rgw_multisite_config): 1501 if rgw_multisite_config != "": 1502 cmd = [ 1503 "radosgw-admin", 1504 rgw_multisite_config, 1505 "get", 1506 "--rgw-" + rgw_multisite_config, 1507 rgw_multisite_config_name, 1508 ] 1509 try: 1510 _ = subprocess.check_output(cmd, stderr=subprocess.PIPE) 1511 except subprocess.CalledProcessError as execErr: 1512 err_msg = ( 1513 f"failed to execute command {cmd}. Output: {execErr.output}. " 1514 f"Code: {execErr.returncode}. Error: {execErr.stderr}" 1515 ) 1516 sys.stderr.write(err_msg) 1517 return "-1" 1518 return "" 1519 1520 def convert_comma_separated_to_array(self, value): 1521 return value.split(",") 1522 1523 def raise_exception_if_any_topology_flag_is_missing(self): 1524 if ( 1525 ( 1526 self._arg_parser.topology_pools != "" 1527 and ( 1528 self._arg_parser.topology_failure_domain_label == "" 1529 or self._arg_parser.topology_failure_domain_values == "" 1530 ) 1531 ) 1532 or ( 1533 self._arg_parser.topology_failure_domain_label != "" 1534 and ( 1535 self._arg_parser.topology_pools == "" 1536 or self._arg_parser.topology_failure_domain_values == "" 1537 ) 1538 ) 1539 or ( 1540 self._arg_parser.topology_failure_domain_values != "" 1541 and ( 1542 self._arg_parser.topology_pools == "" 1543 or self._arg_parser.topology_failure_domain_label == "" 1544 ) 1545 ) 1546 ): 1547 raise ExecutionFailureException( 1548 "provide all the topology flags --topology-pools, --topology-failure-domain-label, --topology-failure-domain-values" 1549 ) 1550 1551 def validate_topology_values(self, topology_pools, topology_fd): 1552 if len(topology_pools) != len(topology_fd): 1553 raise ExecutionFailureException( 1554 f"The provided topology pools, '{topology_pools}', and " 1555 f"topology failure domain, '{topology_fd}'," 1556 f"are of different length, '{len(topology_pools)}' and '{len(topology_fd)}' respctively" 1557 ) 1558 return 1559 1560 def validate_topology_rbd_pools(self, topology_rbd_pools): 1561 for pool in topology_rbd_pools: 1562 self.validate_rbd_pool(pool) 1563 1564 def init_topology_rbd_pools(self, topology_rbd_pools): 1565 for pool in topology_rbd_pools: 1566 self.init_rbd_pool(pool) 1567 1568 def _gen_output_map(self): 1569 if self.out_map: 1570 return 1571 # support legacy flag with upgrades 1572 if self._arg_parser.cluster_name: 1573 self._arg_parser.k8s_cluster_name = self._arg_parser.cluster_name 1574 self._arg_parser.k8s_cluster_name = ( 1575 self._arg_parser.k8s_cluster_name.lower() 1576 ) # always convert cluster name to lowercase characters 1577 self.validate_rbd_pool(self._arg_parser.rbd_data_pool_name) 1578 self.init_rbd_pool(self._arg_parser.rbd_data_pool_name) 1579 self.validate_rados_namespace() 1580 self._excluded_keys.add("K8S_CLUSTER_NAME") 1581 self.get_cephfs_data_pool_details() 1582 self.out_map["NAMESPACE"] = self._arg_parser.namespace 1583 self.out_map["K8S_CLUSTER_NAME"] = self._arg_parser.k8s_cluster_name 1584 self.out_map["ROOK_EXTERNAL_FSID"] = self.get_fsid() 1585 self.out_map["ROOK_EXTERNAL_USERNAME"] = self.run_as_user 1586 self.out_map["ROOK_EXTERNAL_CEPH_MON_DATA"] = self.get_ceph_external_mon_data() 1587 self.out_map["ROOK_EXTERNAL_USER_SECRET"] = self.create_checkerKey( 1588 "client.healthchecker" 1589 ) 1590 self.out_map["ROOK_EXTERNAL_DASHBOARD_LINK"] = self.get_ceph_dashboard_link() 1591 ( 1592 self.out_map["CSI_RBD_NODE_SECRET"], 1593 self.out_map["CSI_RBD_NODE_SECRET_NAME"], 1594 ) = self.create_cephCSIKeyring_user("client.csi-rbd-node") 1595 ( 1596 self.out_map["CSI_RBD_PROVISIONER_SECRET"], 1597 self.out_map["CSI_RBD_PROVISIONER_SECRET_NAME"], 1598 ) = self.create_cephCSIKeyring_user("client.csi-rbd-provisioner") 1599 self.out_map["CEPHFS_POOL_NAME"] = self._arg_parser.cephfs_data_pool_name 1600 self.out_map["CEPHFS_METADATA_POOL_NAME"] = ( 1601 self._arg_parser.cephfs_metadata_pool_name 1602 ) 1603 self.out_map["CEPHFS_FS_NAME"] = self._arg_parser.cephfs_filesystem_name 1604 self.out_map["RESTRICTED_AUTH_PERMISSION"] = ( 1605 self._arg_parser.restricted_auth_permission 1606 ) 1607 self.out_map["RADOS_NAMESPACE"] = self._arg_parser.rados_namespace 1608 self.out_map["SUBVOLUME_GROUP"] = self._arg_parser.subvolume_group 1609 self.out_map["CSI_CEPHFS_NODE_SECRET"] = "" 1610 self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"] = "" 1611 # create CephFS node and provisioner keyring only when MDS exists 1612 if self.out_map["CEPHFS_FS_NAME"] and self.out_map["CEPHFS_POOL_NAME"]: 1613 ( 1614 self.out_map["CSI_CEPHFS_NODE_SECRET"], 1615 self.out_map["CSI_CEPHFS_NODE_SECRET_NAME"], 1616 ) = self.create_cephCSIKeyring_user("client.csi-cephfs-node") 1617 ( 1618 self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"], 1619 self.out_map["CSI_CEPHFS_PROVISIONER_SECRET_NAME"], 1620 ) = self.create_cephCSIKeyring_user("client.csi-cephfs-provisioner") 1621 # create the default "csi" subvolumegroup 1622 self.get_or_create_subvolume_group( 1623 "csi", self._arg_parser.cephfs_filesystem_name 1624 ) 1625 # pin the default "csi" subvolumegroup 1626 self.pin_subvolume( 1627 "csi", self._arg_parser.cephfs_filesystem_name, "distributed", "1" 1628 ) 1629 if self.out_map["SUBVOLUME_GROUP"]: 1630 self.get_or_create_subvolume_group( 1631 self._arg_parser.subvolume_group, 1632 self._arg_parser.cephfs_filesystem_name, 1633 ) 1634 self.pin_subvolume( 1635 self._arg_parser.subvolume_group, 1636 self._arg_parser.cephfs_filesystem_name, 1637 "distributed", 1638 "1", 1639 ) 1640 self.out_map["RGW_TLS_CERT"] = "" 1641 self.out_map["MONITORING_ENDPOINT"] = "" 1642 self.out_map["MONITORING_ENDPOINT_PORT"] = "" 1643 if not self._arg_parser.skip_monitoring_endpoint: 1644 ( 1645 self.out_map["MONITORING_ENDPOINT"], 1646 self.out_map["MONITORING_ENDPOINT_PORT"], 1647 ) = self.get_active_and_standby_mgrs() 1648 self.out_map["RBD_POOL_NAME"] = self._arg_parser.rbd_data_pool_name 1649 self.out_map["RBD_METADATA_EC_POOL_NAME"] = ( 1650 self.validate_rbd_metadata_ec_pool_name() 1651 ) 1652 self.out_map["TOPOLOGY_POOLS"] = self._arg_parser.topology_pools 1653 self.out_map["TOPOLOGY_FAILURE_DOMAIN_LABEL"] = ( 1654 self._arg_parser.topology_failure_domain_label 1655 ) 1656 self.out_map["TOPOLOGY_FAILURE_DOMAIN_VALUES"] = ( 1657 self._arg_parser.topology_failure_domain_values 1658 ) 1659 if ( 1660 self._arg_parser.topology_pools != "" 1661 and self._arg_parser.topology_failure_domain_label != "" 1662 and self._arg_parser.topology_failure_domain_values != "" 1663 ): 1664 self.validate_topology_values( 1665 self.convert_comma_separated_to_array(self.out_map["TOPOLOGY_POOLS"]), 1666 self.convert_comma_separated_to_array( 1667 self.out_map["TOPOLOGY_FAILURE_DOMAIN_VALUES"] 1668 ), 1669 ) 1670 self.validate_topology_rbd_pools( 1671 self.convert_comma_separated_to_array(self.out_map["TOPOLOGY_POOLS"]) 1672 ) 1673 self.init_topology_rbd_pools( 1674 self.convert_comma_separated_to_array(self.out_map["TOPOLOGY_POOLS"]) 1675 ) 1676 else: 1677 self.raise_exception_if_any_topology_flag_is_missing() 1678 1679 self.out_map["RGW_POOL_PREFIX"] = self._arg_parser.rgw_pool_prefix 1680 self.out_map["RGW_ENDPOINT"] = "" 1681 if self._arg_parser.rgw_endpoint: 1682 if self._arg_parser.dry_run: 1683 self.create_rgw_admin_ops_user() 1684 else: 1685 if ( 1686 self._arg_parser.rgw_realm_name != "" 1687 and self._arg_parser.rgw_zonegroup_name != "" 1688 and self._arg_parser.rgw_zone_name != "" 1689 ): 1690 err = self.validate_rgw_multisite( 1691 self._arg_parser.rgw_realm_name, "realm" 1692 ) 1693 err = self.validate_rgw_multisite( 1694 self._arg_parser.rgw_zonegroup_name, "zonegroup" 1695 ) 1696 err = self.validate_rgw_multisite( 1697 self._arg_parser.rgw_zone_name, "zone" 1698 ) 1699 1700 if ( 1701 self._arg_parser.rgw_realm_name == "" 1702 and self._arg_parser.rgw_zonegroup_name == "" 1703 and self._arg_parser.rgw_zone_name == "" 1704 ) or ( 1705 self._arg_parser.rgw_realm_name != "" 1706 and self._arg_parser.rgw_zonegroup_name != "" 1707 and self._arg_parser.rgw_zone_name != "" 1708 ): 1709 ( 1710 self.out_map["RGW_ADMIN_OPS_USER_ACCESS_KEY"], 1711 self.out_map["RGW_ADMIN_OPS_USER_SECRET_KEY"], 1712 info_cap_supported, 1713 err, 1714 ) = self.create_rgw_admin_ops_user() 1715 err = self.validate_rgw_endpoint(info_cap_supported) 1716 if self._arg_parser.rgw_tls_cert_path: 1717 self.out_map["RGW_TLS_CERT"] = ( 1718 self.validate_rgw_endpoint_tls_cert() 1719 ) 1720 # if there is no error, set the RGW_ENDPOINT 1721 if err != "-1": 1722 self.out_map["RGW_ENDPOINT"] = self._arg_parser.rgw_endpoint 1723 else: 1724 err = "Please provide all the RGW multisite parameters or none of them" 1725 sys.stderr.write(err) 1726 1727 def gen_shell_out(self): 1728 self._gen_output_map() 1729 shOutIO = StringIO() 1730 for k, v in self.out_map.items(): 1731 if v and k not in self._excluded_keys: 1732 shOutIO.write(f"export {k}={v}{LINESEP}") 1733 shOut = shOutIO.getvalue() 1734 shOutIO.close() 1735 return shOut 1736 1737 def gen_json_out(self): 1738 self._gen_output_map() 1739 if self._arg_parser.dry_run: 1740 return "" 1741 json_out = [ 1742 { 1743 "name": "rook-ceph-mon-endpoints", 1744 "kind": "ConfigMap", 1745 "data": { 1746 "data": self.out_map["ROOK_EXTERNAL_CEPH_MON_DATA"], 1747 "maxMonId": "0", 1748 "mapping": "{}", 1749 }, 1750 }, 1751 { 1752 "name": "rook-ceph-mon", 1753 "kind": "Secret", 1754 "data": { 1755 "admin-secret": "admin-secret", 1756 "fsid": self.out_map["ROOK_EXTERNAL_FSID"], 1757 "mon-secret": "mon-secret", 1758 }, 1759 }, 1760 { 1761 "name": "rook-ceph-operator-creds", 1762 "kind": "Secret", 1763 "data": { 1764 "userID": self.out_map["ROOK_EXTERNAL_USERNAME"], 1765 "userKey": self.out_map["ROOK_EXTERNAL_USER_SECRET"], 1766 }, 1767 }, 1768 ] 1769 1770 # if 'MONITORING_ENDPOINT' exists, then only add 'monitoring-endpoint' to Cluster 1771 if ( 1772 self.out_map["MONITORING_ENDPOINT"] 1773 and self.out_map["MONITORING_ENDPOINT_PORT"] 1774 ): 1775 json_out.append( 1776 { 1777 "name": "monitoring-endpoint", 1778 "kind": "CephCluster", 1779 "data": { 1780 "MonitoringEndpoint": self.out_map["MONITORING_ENDPOINT"], 1781 "MonitoringPort": self.out_map["MONITORING_ENDPOINT_PORT"], 1782 }, 1783 } 1784 ) 1785 1786 # if 'CSI_RBD_NODE_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret 1787 if ( 1788 self.out_map["CSI_RBD_NODE_SECRET"] 1789 and self.out_map["CSI_RBD_NODE_SECRET_NAME"] 1790 ): 1791 json_out.append( 1792 { 1793 "name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}", 1794 "kind": "Secret", 1795 "data": { 1796 "userID": self.out_map["CSI_RBD_NODE_SECRET_NAME"], 1797 "userKey": self.out_map["CSI_RBD_NODE_SECRET"], 1798 }, 1799 } 1800 ) 1801 # if 'CSI_RBD_PROVISIONER_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret 1802 if ( 1803 self.out_map["CSI_RBD_PROVISIONER_SECRET"] 1804 and self.out_map["CSI_RBD_PROVISIONER_SECRET_NAME"] 1805 ): 1806 json_out.append( 1807 { 1808 "name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}", 1809 "kind": "Secret", 1810 "data": { 1811 "userID": self.out_map["CSI_RBD_PROVISIONER_SECRET_NAME"], 1812 "userKey": self.out_map["CSI_RBD_PROVISIONER_SECRET"], 1813 }, 1814 } 1815 ) 1816 # if 'CSI_CEPHFS_PROVISIONER_SECRET' exists, then only add 'rook-csi-cephfs-provisioner' Secret 1817 if ( 1818 self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"] 1819 and self.out_map["CSI_CEPHFS_PROVISIONER_SECRET_NAME"] 1820 ): 1821 json_out.append( 1822 { 1823 "name": f"rook-{self.out_map['CSI_CEPHFS_PROVISIONER_SECRET_NAME']}", 1824 "kind": "Secret", 1825 "data": { 1826 "adminID": self.out_map["CSI_CEPHFS_PROVISIONER_SECRET_NAME"], 1827 "adminKey": self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"], 1828 }, 1829 } 1830 ) 1831 # if 'CSI_CEPHFS_NODE_SECRET' exists, then only add 'rook-csi-cephfs-node' Secret 1832 if ( 1833 self.out_map["CSI_CEPHFS_NODE_SECRET"] 1834 and self.out_map["CSI_CEPHFS_NODE_SECRET_NAME"] 1835 ): 1836 json_out.append( 1837 { 1838 "name": f"rook-{self.out_map['CSI_CEPHFS_NODE_SECRET_NAME']}", 1839 "kind": "Secret", 1840 "data": { 1841 "adminID": self.out_map["CSI_CEPHFS_NODE_SECRET_NAME"], 1842 "adminKey": self.out_map["CSI_CEPHFS_NODE_SECRET"], 1843 }, 1844 } 1845 ) 1846 # if 'ROOK_EXTERNAL_DASHBOARD_LINK' exists, then only add 'rook-ceph-dashboard-link' Secret 1847 if self.out_map["ROOK_EXTERNAL_DASHBOARD_LINK"]: 1848 json_out.append( 1849 { 1850 "name": "rook-ceph-dashboard-link", 1851 "kind": "Secret", 1852 "data": { 1853 "userID": "ceph-dashboard-link", 1854 "userKey": self.out_map["ROOK_EXTERNAL_DASHBOARD_LINK"], 1855 }, 1856 } 1857 ) 1858 # if 'RADOS_NAMESPACE' exists, then only add the "RADOS_NAMESPACE" namespace 1859 if ( 1860 self.out_map["RADOS_NAMESPACE"] 1861 and self.out_map["RESTRICTED_AUTH_PERMISSION"] 1862 and not self.out_map["RBD_METADATA_EC_POOL_NAME"] 1863 ): 1864 json_out.append( 1865 { 1866 "name": "rados-namespace", 1867 "kind": "CephBlockPoolRadosNamespace", 1868 "data": { 1869 "radosNamespaceName": self.out_map["RADOS_NAMESPACE"], 1870 "pool": self.out_map["RBD_POOL_NAME"], 1871 }, 1872 } 1873 ) 1874 json_out.append( 1875 { 1876 "name": "ceph-rbd-rados-namespace", 1877 "kind": "StorageClass", 1878 "data": { 1879 "pool": self.out_map["RBD_POOL_NAME"], 1880 "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}", 1881 "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}", 1882 "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}", 1883 }, 1884 } 1885 ) 1886 else: 1887 if self.out_map["RBD_METADATA_EC_POOL_NAME"]: 1888 json_out.append( 1889 { 1890 "name": "ceph-rbd", 1891 "kind": "StorageClass", 1892 "data": { 1893 "dataPool": self.out_map["RBD_POOL_NAME"], 1894 "pool": self.out_map["RBD_METADATA_EC_POOL_NAME"], 1895 "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}", 1896 "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}", 1897 "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}", 1898 }, 1899 } 1900 ) 1901 else: 1902 json_out.append( 1903 { 1904 "name": "ceph-rbd", 1905 "kind": "StorageClass", 1906 "data": { 1907 "pool": self.out_map["RBD_POOL_NAME"], 1908 "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}", 1909 "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}", 1910 "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}", 1911 }, 1912 } 1913 ) 1914 1915 # if 'TOPOLOGY_POOLS', 'TOPOLOGY_FAILURE_DOMAIN_LABEL', 'TOPOLOGY_FAILURE_DOMAIN_VALUES' exists, 1916 # then only add 'topology' StorageClass 1917 if ( 1918 self.out_map["TOPOLOGY_POOLS"] 1919 and self.out_map["TOPOLOGY_FAILURE_DOMAIN_LABEL"] 1920 and self.out_map["TOPOLOGY_FAILURE_DOMAIN_VALUES"] 1921 ): 1922 json_out.append( 1923 { 1924 "name": "ceph-rbd-topology", 1925 "kind": "StorageClass", 1926 "data": { 1927 "topologyFailureDomainLabel": self.out_map[ 1928 "TOPOLOGY_FAILURE_DOMAIN_LABEL" 1929 ], 1930 "topologyFailureDomainValues": self.out_map[ 1931 "TOPOLOGY_FAILURE_DOMAIN_VALUES" 1932 ], 1933 "topologyPools": self.out_map["TOPOLOGY_POOLS"], 1934 "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}", 1935 "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}", 1936 "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}", 1937 }, 1938 } 1939 ) 1940 1941 # if 'CEPHFS_FS_NAME' exists, then only add 'cephfs' StorageClass 1942 if self.out_map["CEPHFS_FS_NAME"]: 1943 json_out.append( 1944 { 1945 "name": "cephfs", 1946 "kind": "StorageClass", 1947 "data": { 1948 "fsName": self.out_map["CEPHFS_FS_NAME"], 1949 "pool": self.out_map["CEPHFS_POOL_NAME"], 1950 "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_CEPHFS_PROVISIONER_SECRET_NAME']}", 1951 "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_CEPHFS_PROVISIONER_SECRET_NAME']}", 1952 "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_CEPHFS_NODE_SECRET_NAME']}", 1953 }, 1954 } 1955 ) 1956 # if 'RGW_ENDPOINT' exists, then only add 'ceph-rgw' StorageClass 1957 if self.out_map["RGW_ENDPOINT"]: 1958 json_out.append( 1959 { 1960 "name": "ceph-rgw", 1961 "kind": "StorageClass", 1962 "data": { 1963 "endpoint": self.out_map["RGW_ENDPOINT"], 1964 "poolPrefix": self.out_map["RGW_POOL_PREFIX"], 1965 }, 1966 } 1967 ) 1968 json_out.append( 1969 { 1970 "name": "rgw-admin-ops-user", 1971 "kind": "Secret", 1972 "data": { 1973 "accessKey": self.out_map["RGW_ADMIN_OPS_USER_ACCESS_KEY"], 1974 "secretKey": self.out_map["RGW_ADMIN_OPS_USER_SECRET_KEY"], 1975 }, 1976 } 1977 ) 1978 # if 'RGW_TLS_CERT' exists, then only add the "ceph-rgw-tls-cert" secret 1979 if self.out_map["RGW_TLS_CERT"]: 1980 json_out.append( 1981 { 1982 "name": "ceph-rgw-tls-cert", 1983 "kind": "Secret", 1984 "data": { 1985 "cert": self.out_map["RGW_TLS_CERT"], 1986 }, 1987 } 1988 ) 1989 1990 return json.dumps(json_out) + LINESEP 1991 1992 def upgrade_users_permissions(self): 1993 users = [ 1994 "client.csi-cephfs-node", 1995 "client.csi-cephfs-provisioner", 1996 "client.csi-rbd-node", 1997 "client.csi-rbd-provisioner", 1998 "client.healthchecker", 1999 ] 2000 if self.run_as_user != "" and self.run_as_user not in users: 2001 users.append(self.run_as_user) 2002 for user in users: 2003 self.upgrade_user_permissions(user) 2004 2005 def get_rgw_pool_name_during_upgrade(self, user, caps): 2006 if user == "client.healthchecker": 2007 # when admin has not provided rgw pool name during upgrade, 2008 # get the rgw pool name from client.healthchecker user which was used during connection 2009 if not self._arg_parser.rgw_pool_prefix: 2010 # To get value 'default' which is rgw pool name from 'allow rwx pool=default.rgw.meta' 2011 pattern = r"pool=(.*?)\.rgw\.meta" 2012 match = re.search(pattern, caps) 2013 if match: 2014 self._arg_parser.rgw_pool_prefix = match.group(1) 2015 else: 2016 raise ExecutionFailureException( 2017 "failed to get rgw pool name for upgrade" 2018 ) 2019 2020 def upgrade_user_permissions(self, user): 2021 # check whether the given user exists or not 2022 cmd_json = {"prefix": "auth get", "entity": f"{user}", "format": "json"} 2023 ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) 2024 if ret_val != 0 or len(json_out) == 0: 2025 print(f"user {user} not found for upgrading.") 2026 return 2027 existing_caps = json_out[0]["caps"] 2028 self.get_rgw_pool_name_during_upgrade(user, str(existing_caps)) 2029 new_cap, _ = self.get_caps_and_entity(user) 2030 cap_keys = ["mon", "mgr", "osd", "mds"] 2031 caps = [] 2032 for eachCap in cap_keys: 2033 cur_cap_values = existing_caps.get(eachCap, "") 2034 new_cap_values = new_cap.get(eachCap, "") 2035 cur_cap_perm_list = [ 2036 x.strip() for x in cur_cap_values.split(",") if x.strip() 2037 ] 2038 new_cap_perm_list = [ 2039 x.strip() for x in new_cap_values.split(",") if x.strip() 2040 ] 2041 # append new_cap_list to cur_cap_list to maintain the order of caps 2042 cur_cap_perm_list.extend(new_cap_perm_list) 2043 # eliminate duplicates without using 'set' 2044 # set re-orders items in the list and we have to keep the order 2045 new_cap_list = [] 2046 [new_cap_list.append(x) for x in cur_cap_perm_list if x not in new_cap_list] 2047 existing_caps[eachCap] = ", ".join(new_cap_list) 2048 if existing_caps[eachCap]: 2049 caps.append(eachCap) 2050 caps.append(existing_caps[eachCap]) 2051 cmd_json = { 2052 "prefix": "auth caps", 2053 "entity": user, 2054 "caps": caps, 2055 "format": "json", 2056 } 2057 ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) 2058 if ret_val != 0: 2059 raise ExecutionFailureException( 2060 f"'auth caps {user}' command failed.\n Error: {err_msg}" 2061 ) 2062 print(f"Updated user {user} successfully.") 2063 2064 def main(self): 2065 generated_output = "" 2066 if self._arg_parser.upgrade: 2067 self.upgrade_users_permissions() 2068 elif self._arg_parser.format == "json": 2069 generated_output = self.gen_json_out() 2070 elif self._arg_parser.format == "bash": 2071 generated_output = self.gen_shell_out() 2072 else: 2073 raise ExecutionFailureException( 2074 f"Unsupported format: {self._arg_parser.format}" 2075 ) 2076 print(generated_output) 2077 if self.output_file and generated_output: 2078 fOut = open(self.output_file, mode="w", encoding="UTF-8") 2079 fOut.write(generated_output) 2080 fOut.close() 2081 2082 2083################################################ 2084##################### MAIN ##################### 2085################################################ 2086if __name__ == "__main__": 2087 rjObj = RadosJSON() 2088 try: 2089 rjObj.main() 2090 except ExecutionFailureException as err: 2091 print(f"Execution Failed: {err}") 2092 raise err 2093 except KeyError as kErr: 2094 print(f"KeyError: {kErr}") 2095 except OSError as osErr: 2096 print(f"Error while trying to output the data: {osErr}") 2097 finally: 2098 rjObj.shutdown()