Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'ceph-for-6.10-rc8' of https://github.com/ceph/ceph-client

Pull ceph fixes from Ilya Dryomov:
"A fix for a possible use-after-free following "rbd unmap" or "umount"
marked for stable and two kernel-doc fixups"

* tag 'ceph-for-6.10-rc8' of https://github.com/ceph/ceph-client:
libceph: fix crush_choose_firstn() kernel-doc warnings
libceph: suppress crush_choose_indep() kernel-doc warnings
libceph: fix race between delayed_work() and ceph_monc_stop()

+17 -4
+5 -2
net/ceph/crush/mapper.c
··· 429 429 /** 430 430 * crush_choose_firstn - choose numrep distinct items of given type 431 431 * @map: the crush_map 432 + * @work: working space initialized by crush_init_workspace() 432 433 * @bucket: the bucket we are choose an item from 434 + * @weight: weight vector (for map leaves) 435 + * @weight_max: size of weight vector 433 436 * @x: crush input value 434 437 * @numrep: the number of items to choose 435 438 * @type: the type of item to choose ··· 448 445 * @vary_r: pass r to recursive calls 449 446 * @out2: second output vector for leaf items (if @recurse_to_leaf) 450 447 * @parent_r: r value passed from the parent 448 + * @choose_args: weights and ids for each known bucket 451 449 */ 452 450 static int crush_choose_firstn(const struct crush_map *map, 453 451 struct crush_work *work, ··· 640 636 } 641 637 642 638 643 - /** 639 + /* 644 640 * crush_choose_indep: alternative breadth-first positionally stable mapping 645 - * 646 641 */ 647 642 static void crush_choose_indep(const struct crush_map *map, 648 643 struct crush_work *work,
+12 -2
net/ceph/mon_client.c
··· 1085 1085 struct ceph_mon_client *monc = 1086 1086 container_of(work, struct ceph_mon_client, delayed_work.work); 1087 1087 1088 - dout("monc delayed_work\n"); 1089 1088 mutex_lock(&monc->mutex); 1089 + dout("%s mon%d\n", __func__, monc->cur_mon); 1090 + if (monc->cur_mon < 0) { 1091 + goto out; 1092 + } 1093 + 1090 1094 if (monc->hunting) { 1091 1095 dout("%s continuing hunt\n", __func__); 1092 1096 reopen_session(monc); 1093 1097 } else { 1094 1098 int is_auth = ceph_auth_is_authenticated(monc->auth); 1099 + 1100 + dout("%s is_authed %d\n", __func__, is_auth); 1095 1101 if (ceph_con_keepalive_expired(&monc->con, 1096 1102 CEPH_MONC_PING_TIMEOUT)) { 1097 1103 dout("monc keepalive timeout\n"); ··· 1122 1116 } 1123 1117 } 1124 1118 __schedule_delayed(monc); 1119 + 1120 + out: 1125 1121 mutex_unlock(&monc->mutex); 1126 1122 } 1127 1123 ··· 1240 1232 void ceph_monc_stop(struct ceph_mon_client *monc) 1241 1233 { 1242 1234 dout("stop\n"); 1243 - cancel_delayed_work_sync(&monc->delayed_work); 1244 1235 1245 1236 mutex_lock(&monc->mutex); 1246 1237 __close_session(monc); 1238 + monc->hunting = false; 1247 1239 monc->cur_mon = -1; 1248 1240 mutex_unlock(&monc->mutex); 1241 + 1242 + cancel_delayed_work_sync(&monc->delayed_work); 1249 1243 1250 1244 /* 1251 1245 * flush msgr queue before we destroy ourselves to ensure that: