Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drbd: use genl pre_doit/post_doit

Every doit handler followed the same pattern: stack-allocate an
adm_ctx, call drbd_adm_prepare() at the top, call drbd_adm_finish()
at the bottom. This duplicated boilerplate across 25 handlers and
made error paths inconsistent, since some handlers could miss sending
the reply skb on early-exit paths.

The generic netlink framework already provides pre_doit/post_doit
hooks for exactly this purpose. An old comment even noted "this
would be a good candidate for a pre_doit hook".

Use them:

- pre_doit heap-allocates adm_ctx, looks up per-command flags from a
new drbd_genl_cmd_flags[] table, runs drbd_adm_prepare(), and
stores the context in info->user_ptr[0].
- post_doit sends the reply, drops kref references for
device/connection/resource, and frees the adm_ctx.
- Handlers just receive adm_ctx from info->user_ptr[0], set
reply_dh->ret_code, and return. All teardown is in post_doit.
- drbd_adm_finish() is removed, superseded by post_doit.

Signed-off-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com>
Link: https://patch.msgid.link/20260324152907.2840984-1-christoph.boehmwalder@linbit.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Böhmwalder and committed by
Jens Axboe
630bbba4 829def1e

+324 -253
+320 -253
drivers/block/drbd/drbd_nl.c
··· 75 75 76 76 #include <linux/drbd_genl_api.h> 77 77 #include "drbd_nla.h" 78 + 79 + static int drbd_pre_doit(const struct genl_split_ops *ops, 80 + struct sk_buff *skb, struct genl_info *info); 81 + static void drbd_post_doit(const struct genl_split_ops *ops, 82 + struct sk_buff *skb, struct genl_info *info); 83 + 84 + #define GENL_MAGIC_FAMILY_PRE_DOIT drbd_pre_doit 85 + #define GENL_MAGIC_FAMILY_POST_DOIT drbd_post_doit 86 + 78 87 #include <linux/genl_magic_func.h> 79 88 80 89 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */ ··· 153 144 return 0; 154 145 } 155 146 156 - /* This would be a good candidate for a "pre_doit" hook, 157 - * and per-family private info->pointers. 158 - * But we need to stay compatible with older kernels. 159 - * If it returns successfully, adm_ctx members are valid. 160 - * 147 + /* Flags for drbd_adm_prepare() */ 148 + #define DRBD_ADM_NEED_MINOR (1 << 0) 149 + #define DRBD_ADM_NEED_RESOURCE (1 << 1) 150 + #define DRBD_ADM_NEED_CONNECTION (1 << 2) 151 + 152 + /* Per-command flags for drbd_pre_doit() */ 153 + static const unsigned int drbd_genl_cmd_flags[] = { 154 + [DRBD_ADM_GET_STATUS] = DRBD_ADM_NEED_MINOR, 155 + [DRBD_ADM_NEW_MINOR] = DRBD_ADM_NEED_RESOURCE, 156 + [DRBD_ADM_DEL_MINOR] = DRBD_ADM_NEED_MINOR, 157 + [DRBD_ADM_NEW_RESOURCE] = 0, 158 + [DRBD_ADM_DEL_RESOURCE] = DRBD_ADM_NEED_RESOURCE, 159 + [DRBD_ADM_RESOURCE_OPTS] = DRBD_ADM_NEED_RESOURCE, 160 + [DRBD_ADM_CONNECT] = DRBD_ADM_NEED_RESOURCE, 161 + [DRBD_ADM_CHG_NET_OPTS] = DRBD_ADM_NEED_CONNECTION, 162 + [DRBD_ADM_DISCONNECT] = DRBD_ADM_NEED_CONNECTION, 163 + [DRBD_ADM_ATTACH] = DRBD_ADM_NEED_MINOR, 164 + [DRBD_ADM_CHG_DISK_OPTS] = DRBD_ADM_NEED_MINOR, 165 + [DRBD_ADM_RESIZE] = DRBD_ADM_NEED_MINOR, 166 + [DRBD_ADM_PRIMARY] = DRBD_ADM_NEED_MINOR, 167 + [DRBD_ADM_SECONDARY] = DRBD_ADM_NEED_MINOR, 168 + [DRBD_ADM_NEW_C_UUID] = DRBD_ADM_NEED_MINOR, 169 + [DRBD_ADM_START_OV] = DRBD_ADM_NEED_MINOR, 170 + [DRBD_ADM_DETACH] = DRBD_ADM_NEED_MINOR, 171 + [DRBD_ADM_INVALIDATE] = DRBD_ADM_NEED_MINOR, 172 + [DRBD_ADM_INVAL_PEER] = DRBD_ADM_NEED_MINOR, 173 + [DRBD_ADM_PAUSE_SYNC] = DRBD_ADM_NEED_MINOR, 174 + [DRBD_ADM_RESUME_SYNC] = DRBD_ADM_NEED_MINOR, 175 + [DRBD_ADM_SUSPEND_IO] = DRBD_ADM_NEED_MINOR, 176 + [DRBD_ADM_RESUME_IO] = DRBD_ADM_NEED_MINOR, 177 + [DRBD_ADM_OUTDATE] = DRBD_ADM_NEED_MINOR, 178 + [DRBD_ADM_GET_TIMEOUT_TYPE] = DRBD_ADM_NEED_MINOR, 179 + [DRBD_ADM_DOWN] = DRBD_ADM_NEED_RESOURCE, 180 + }; 181 + 182 + /* 161 183 * At this point, we still rely on the global genl_lock(). 162 184 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need 163 185 * to add additional synchronization against object destruction/modification. 164 186 */ 165 - #define DRBD_ADM_NEED_MINOR 1 166 - #define DRBD_ADM_NEED_RESOURCE 2 167 - #define DRBD_ADM_NEED_CONNECTION 4 168 187 static int drbd_adm_prepare(struct drbd_config_context *adm_ctx, 169 188 struct sk_buff *skb, struct genl_info *info, unsigned flags) 170 189 { 171 190 struct drbd_genlmsghdr *d_in = genl_info_userhdr(info); 172 191 const u8 cmd = info->genlhdr->cmd; 173 192 int err; 174 - 175 - memset(adm_ctx, 0, sizeof(*adm_ctx)); 176 193 177 194 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */ 178 195 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN)) ··· 335 300 return err; 336 301 } 337 302 338 - static int drbd_adm_finish(struct drbd_config_context *adm_ctx, 339 - struct genl_info *info, int retcode) 303 + static int drbd_pre_doit(const struct genl_split_ops *ops, 304 + struct sk_buff *skb, struct genl_info *info) 340 305 { 306 + struct drbd_config_context *adm_ctx; 307 + u8 cmd = info->genlhdr->cmd; 308 + unsigned int flags; 309 + int err; 310 + 311 + adm_ctx = kzalloc_obj(*adm_ctx); 312 + if (!adm_ctx) 313 + return -ENOMEM; 314 + 315 + flags = (cmd < ARRAY_SIZE(drbd_genl_cmd_flags)) 316 + ? drbd_genl_cmd_flags[cmd] : 0; 317 + 318 + err = drbd_adm_prepare(adm_ctx, skb, info, flags); 319 + if (err && !adm_ctx->reply_skb) { 320 + /* Fatal error before reply_skb was allocated. */ 321 + kfree(adm_ctx); 322 + return err; 323 + } 324 + if (err) 325 + adm_ctx->reply_dh->ret_code = err; 326 + 327 + info->user_ptr[0] = adm_ctx; 328 + return 0; 329 + } 330 + 331 + static void drbd_post_doit(const struct genl_split_ops *ops, 332 + struct sk_buff *skb, struct genl_info *info) 333 + { 334 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 335 + 336 + if (!adm_ctx) 337 + return; 338 + 339 + if (adm_ctx->reply_skb) 340 + drbd_adm_send_reply(adm_ctx->reply_skb, info); 341 + 341 342 if (adm_ctx->device) { 342 343 kref_put(&adm_ctx->device->kref, drbd_destroy_device); 343 344 adm_ctx->device = NULL; ··· 387 316 adm_ctx->resource = NULL; 388 317 } 389 318 390 - if (!adm_ctx->reply_skb) 391 - return -ENOMEM; 392 - 393 - adm_ctx->reply_dh->ret_code = retcode; 394 - drbd_adm_send_reply(adm_ctx->reply_skb, info); 395 - return 0; 319 + kfree(adm_ctx); 396 320 } 397 321 398 322 static void setup_khelper_env(struct drbd_connection *connection, char **envp) ··· 832 766 833 767 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info) 834 768 { 835 - struct drbd_config_context adm_ctx; 769 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 836 770 struct set_role_parms parms; 837 771 int err; 838 772 enum drbd_ret_code retcode; 839 773 enum drbd_state_rv rv; 840 774 841 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 842 - if (!adm_ctx.reply_skb) 843 - return retcode; 775 + if (!adm_ctx->reply_skb) 776 + return 0; 777 + retcode = adm_ctx->reply_dh->ret_code; 844 778 if (retcode != NO_ERROR) 845 779 goto out; 846 780 ··· 849 783 err = set_role_parms_from_attrs(&parms, info); 850 784 if (err) { 851 785 retcode = ERR_MANDATORY_TAG; 852 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 786 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 853 787 goto out; 854 788 } 855 789 } 856 790 genl_unlock(); 857 - mutex_lock(&adm_ctx.resource->adm_mutex); 791 + mutex_lock(&adm_ctx->resource->adm_mutex); 858 792 859 793 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY) 860 - rv = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate); 794 + rv = drbd_set_role(adm_ctx->device, R_PRIMARY, parms.assume_uptodate); 861 795 else 862 - rv = drbd_set_role(adm_ctx.device, R_SECONDARY, 0); 796 + rv = drbd_set_role(adm_ctx->device, R_SECONDARY, 0); 863 797 864 - mutex_unlock(&adm_ctx.resource->adm_mutex); 798 + mutex_unlock(&adm_ctx->resource->adm_mutex); 865 799 genl_lock(); 866 - drbd_adm_finish(&adm_ctx, info, rv); 800 + adm_ctx->reply_dh->ret_code = rv; 867 801 return 0; 868 802 out: 869 - drbd_adm_finish(&adm_ctx, info, retcode); 803 + adm_ctx->reply_dh->ret_code = retcode; 870 804 return 0; 871 805 } 872 806 ··· 1578 1512 1579 1513 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) 1580 1514 { 1581 - struct drbd_config_context adm_ctx; 1515 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 1582 1516 enum drbd_ret_code retcode; 1583 1517 struct drbd_device *device; 1584 1518 struct disk_conf *new_disk_conf, *old_disk_conf; ··· 1586 1520 int err; 1587 1521 unsigned int fifo_size; 1588 1522 1589 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 1590 - if (!adm_ctx.reply_skb) 1591 - return retcode; 1523 + if (!adm_ctx->reply_skb) 1524 + return 0; 1525 + retcode = adm_ctx->reply_dh->ret_code; 1592 1526 if (retcode != NO_ERROR) 1593 1527 goto finish; 1594 1528 1595 - device = adm_ctx.device; 1596 - mutex_lock(&adm_ctx.resource->adm_mutex); 1529 + device = adm_ctx->device; 1530 + mutex_lock(&adm_ctx->resource->adm_mutex); 1597 1531 1598 1532 /* we also need a disk 1599 1533 * to change the options on */ ··· 1617 1551 err = disk_conf_from_attrs_for_change(new_disk_conf, info); 1618 1552 if (err && err != -ENOMSG) { 1619 1553 retcode = ERR_MANDATORY_TAG; 1620 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 1554 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 1621 1555 goto fail_unlock; 1622 1556 } 1623 1557 ··· 1643 1577 if (err) { 1644 1578 /* Could be just "busy". Ignore? 1645 1579 * Introduce dedicated error code? */ 1646 - drbd_msg_put_info(adm_ctx.reply_skb, 1580 + drbd_msg_put_info(adm_ctx->reply_skb, 1647 1581 "Try again without changing current al-extents setting"); 1648 1582 retcode = ERR_NOMEM; 1649 1583 goto fail_unlock; ··· 1706 1640 success: 1707 1641 put_ldev(device); 1708 1642 out: 1709 - mutex_unlock(&adm_ctx.resource->adm_mutex); 1643 + mutex_unlock(&adm_ctx->resource->adm_mutex); 1710 1644 finish: 1711 - drbd_adm_finish(&adm_ctx, info, retcode); 1645 + adm_ctx->reply_dh->ret_code = retcode; 1712 1646 return 0; 1713 1647 } 1714 1648 ··· 1800 1734 1801 1735 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) 1802 1736 { 1803 - struct drbd_config_context adm_ctx; 1737 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 1804 1738 struct drbd_device *device; 1805 1739 struct drbd_peer_device *peer_device; 1806 1740 struct drbd_connection *connection; ··· 1817 1751 enum drbd_state_rv rv; 1818 1752 struct net_conf *nc; 1819 1753 1820 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 1821 - if (!adm_ctx.reply_skb) 1822 - return retcode; 1754 + if (!adm_ctx->reply_skb) 1755 + return 0; 1756 + retcode = adm_ctx->reply_dh->ret_code; 1823 1757 if (retcode != NO_ERROR) 1824 1758 goto finish; 1825 1759 1826 - device = adm_ctx.device; 1827 - mutex_lock(&adm_ctx.resource->adm_mutex); 1760 + device = adm_ctx->device; 1761 + mutex_lock(&adm_ctx->resource->adm_mutex); 1828 1762 peer_device = first_peer_device(device); 1829 1763 connection = peer_device->connection; 1830 1764 conn_reconfig_start(connection); ··· 1869 1803 err = disk_conf_from_attrs(new_disk_conf, info); 1870 1804 if (err) { 1871 1805 retcode = ERR_MANDATORY_TAG; 1872 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 1806 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 1873 1807 goto fail; 1874 1808 } 1875 1809 ··· 2020 1954 drbd_warn(device, "truncating a consistent device during attach (%llu < %llu)\n", nsz, eff); 2021 1955 } else { 2022 1956 drbd_warn(device, "refusing to truncate a consistent device (%llu < %llu)\n", nsz, eff); 2023 - drbd_msg_sprintf_info(adm_ctx.reply_skb, 1957 + drbd_msg_sprintf_info(adm_ctx->reply_skb, 2024 1958 "To-be-attached device has last effective > current size, and is consistent\n" 2025 1959 "(%llu > %llu sectors). Refusing to attach.", eff, nsz); 2026 1960 retcode = ERR_IMPLICIT_SHRINK; ··· 2196 2130 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); 2197 2131 put_ldev(device); 2198 2132 conn_reconfig_done(connection); 2199 - mutex_unlock(&adm_ctx.resource->adm_mutex); 2200 - drbd_adm_finish(&adm_ctx, info, retcode); 2133 + mutex_unlock(&adm_ctx->resource->adm_mutex); 2134 + adm_ctx->reply_dh->ret_code = retcode; 2201 2135 return 0; 2202 2136 2203 2137 force_diskless_dec: ··· 2216 2150 kfree(new_disk_conf); 2217 2151 lc_destroy(resync_lru); 2218 2152 kfree(new_plan); 2219 - mutex_unlock(&adm_ctx.resource->adm_mutex); 2153 + mutex_unlock(&adm_ctx->resource->adm_mutex); 2220 2154 finish: 2221 - drbd_adm_finish(&adm_ctx, info, retcode); 2155 + adm_ctx->reply_dh->ret_code = retcode; 2222 2156 return 0; 2223 2157 } 2224 2158 ··· 2240 2174 * Only then we have finally detached. */ 2241 2175 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info) 2242 2176 { 2243 - struct drbd_config_context adm_ctx; 2177 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 2244 2178 enum drbd_ret_code retcode; 2245 2179 struct detach_parms parms = { }; 2246 2180 int err; 2247 2181 2248 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 2249 - if (!adm_ctx.reply_skb) 2250 - return retcode; 2182 + if (!adm_ctx->reply_skb) 2183 + return 0; 2184 + retcode = adm_ctx->reply_dh->ret_code; 2251 2185 if (retcode != NO_ERROR) 2252 2186 goto out; 2253 2187 ··· 2255 2189 err = detach_parms_from_attrs(&parms, info); 2256 2190 if (err) { 2257 2191 retcode = ERR_MANDATORY_TAG; 2258 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2192 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 2259 2193 goto out; 2260 2194 } 2261 2195 } 2262 2196 2263 - mutex_lock(&adm_ctx.resource->adm_mutex); 2264 - retcode = adm_detach(adm_ctx.device, parms.force_detach); 2265 - mutex_unlock(&adm_ctx.resource->adm_mutex); 2197 + mutex_lock(&adm_ctx->resource->adm_mutex); 2198 + retcode = adm_detach(adm_ctx->device, parms.force_detach); 2199 + mutex_unlock(&adm_ctx->resource->adm_mutex); 2266 2200 out: 2267 - drbd_adm_finish(&adm_ctx, info, retcode); 2201 + adm_ctx->reply_dh->ret_code = retcode; 2268 2202 return 0; 2269 2203 } 2270 2204 ··· 2438 2372 2439 2373 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) 2440 2374 { 2441 - struct drbd_config_context adm_ctx; 2375 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 2442 2376 enum drbd_ret_code retcode; 2443 2377 struct drbd_connection *connection; 2444 2378 struct net_conf *old_net_conf, *new_net_conf = NULL; ··· 2447 2381 int rsr; /* re-sync running */ 2448 2382 struct crypto crypto = { }; 2449 2383 2450 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION); 2451 - if (!adm_ctx.reply_skb) 2452 - return retcode; 2384 + if (!adm_ctx->reply_skb) 2385 + return 0; 2386 + retcode = adm_ctx->reply_dh->ret_code; 2453 2387 if (retcode != NO_ERROR) 2454 2388 goto finish; 2455 2389 2456 - connection = adm_ctx.connection; 2457 - mutex_lock(&adm_ctx.resource->adm_mutex); 2390 + connection = adm_ctx->connection; 2391 + mutex_lock(&adm_ctx->resource->adm_mutex); 2458 2392 2459 2393 new_net_conf = kzalloc_obj(struct net_conf); 2460 2394 if (!new_net_conf) { ··· 2469 2403 old_net_conf = connection->net_conf; 2470 2404 2471 2405 if (!old_net_conf) { 2472 - drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect"); 2406 + drbd_msg_put_info(adm_ctx->reply_skb, "net conf missing, try connect"); 2473 2407 retcode = ERR_INVALID_REQUEST; 2474 2408 goto fail; 2475 2409 } ··· 2481 2415 err = net_conf_from_attrs_for_change(new_net_conf, info); 2482 2416 if (err && err != -ENOMSG) { 2483 2417 retcode = ERR_MANDATORY_TAG; 2484 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2418 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 2485 2419 goto fail; 2486 2420 } 2487 2421 ··· 2551 2485 done: 2552 2486 conn_reconfig_done(connection); 2553 2487 out: 2554 - mutex_unlock(&adm_ctx.resource->adm_mutex); 2488 + mutex_unlock(&adm_ctx->resource->adm_mutex); 2555 2489 finish: 2556 - drbd_adm_finish(&adm_ctx, info, retcode); 2490 + adm_ctx->reply_dh->ret_code = retcode; 2557 2491 return 0; 2558 2492 } 2559 2493 ··· 2582 2516 struct connection_info connection_info; 2583 2517 enum drbd_notification_type flags; 2584 2518 unsigned int peer_devices = 0; 2585 - struct drbd_config_context adm_ctx; 2519 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 2586 2520 struct drbd_peer_device *peer_device; 2587 2521 struct net_conf *old_net_conf, *new_net_conf = NULL; 2588 2522 struct crypto crypto = { }; ··· 2593 2527 int i; 2594 2528 int err; 2595 2529 2596 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); 2597 - 2598 - if (!adm_ctx.reply_skb) 2599 - return retcode; 2530 + if (!adm_ctx->reply_skb) 2531 + return 0; 2532 + retcode = adm_ctx->reply_dh->ret_code; 2600 2533 if (retcode != NO_ERROR) 2601 2534 goto out; 2602 - if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) { 2603 - drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing"); 2535 + if (!(adm_ctx->my_addr && adm_ctx->peer_addr)) { 2536 + drbd_msg_put_info(adm_ctx->reply_skb, "connection endpoint(s) missing"); 2604 2537 retcode = ERR_INVALID_REQUEST; 2605 2538 goto out; 2606 2539 } ··· 2609 2544 * concurrent reconfiguration/addition/deletion */ 2610 2545 for_each_resource(resource, &drbd_resources) { 2611 2546 for_each_connection(connection, resource) { 2612 - if (nla_len(adm_ctx.my_addr) == connection->my_addr_len && 2613 - !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr, 2547 + if (nla_len(adm_ctx->my_addr) == connection->my_addr_len && 2548 + !memcmp(nla_data(adm_ctx->my_addr), &connection->my_addr, 2614 2549 connection->my_addr_len)) { 2615 2550 retcode = ERR_LOCAL_ADDR; 2616 2551 goto out; 2617 2552 } 2618 2553 2619 - if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len && 2620 - !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr, 2554 + if (nla_len(adm_ctx->peer_addr) == connection->peer_addr_len && 2555 + !memcmp(nla_data(adm_ctx->peer_addr), &connection->peer_addr, 2621 2556 connection->peer_addr_len)) { 2622 2557 retcode = ERR_PEER_ADDR; 2623 2558 goto out; ··· 2625 2560 } 2626 2561 } 2627 2562 2628 - mutex_lock(&adm_ctx.resource->adm_mutex); 2629 - connection = first_connection(adm_ctx.resource); 2563 + mutex_lock(&adm_ctx->resource->adm_mutex); 2564 + connection = first_connection(adm_ctx->resource); 2630 2565 conn_reconfig_start(connection); 2631 2566 2632 2567 if (connection->cstate > C_STANDALONE) { ··· 2646 2581 err = net_conf_from_attrs(new_net_conf, info); 2647 2582 if (err && err != -ENOMSG) { 2648 2583 retcode = ERR_MANDATORY_TAG; 2649 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2584 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 2650 2585 goto fail; 2651 2586 } 2652 2587 ··· 2662 2597 2663 2598 drbd_flush_workqueue(&connection->sender_work); 2664 2599 2665 - mutex_lock(&adm_ctx.resource->conf_update); 2600 + mutex_lock(&adm_ctx->resource->conf_update); 2666 2601 old_net_conf = connection->net_conf; 2667 2602 if (old_net_conf) { 2668 2603 retcode = ERR_NET_CONFIGURED; 2669 - mutex_unlock(&adm_ctx.resource->conf_update); 2604 + mutex_unlock(&adm_ctx->resource->conf_update); 2670 2605 goto fail; 2671 2606 } 2672 2607 rcu_assign_pointer(connection->net_conf, new_net_conf); ··· 2677 2612 connection->csums_tfm = crypto.csums_tfm; 2678 2613 connection->verify_tfm = crypto.verify_tfm; 2679 2614 2680 - connection->my_addr_len = nla_len(adm_ctx.my_addr); 2681 - memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len); 2682 - connection->peer_addr_len = nla_len(adm_ctx.peer_addr); 2683 - memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len); 2615 + connection->my_addr_len = nla_len(adm_ctx->my_addr); 2616 + memcpy(&connection->my_addr, nla_data(adm_ctx->my_addr), connection->my_addr_len); 2617 + connection->peer_addr_len = nla_len(adm_ctx->peer_addr); 2618 + memcpy(&connection->peer_addr, nla_data(adm_ctx->peer_addr), connection->peer_addr_len); 2684 2619 2685 2620 idr_for_each_entry(&connection->peer_devices, peer_device, i) { 2686 2621 peer_devices++; ··· 2698 2633 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags); 2699 2634 } 2700 2635 mutex_unlock(&notification_mutex); 2701 - mutex_unlock(&adm_ctx.resource->conf_update); 2636 + mutex_unlock(&adm_ctx->resource->conf_update); 2702 2637 2703 2638 rcu_read_lock(); 2704 2639 idr_for_each_entry(&connection->peer_devices, peer_device, i) { ··· 2711 2646 rv = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); 2712 2647 2713 2648 conn_reconfig_done(connection); 2714 - mutex_unlock(&adm_ctx.resource->adm_mutex); 2715 - drbd_adm_finish(&adm_ctx, info, rv); 2649 + mutex_unlock(&adm_ctx->resource->adm_mutex); 2650 + adm_ctx->reply_dh->ret_code = rv; 2716 2651 return 0; 2717 2652 2718 2653 fail: ··· 2720 2655 kfree(new_net_conf); 2721 2656 2722 2657 conn_reconfig_done(connection); 2723 - mutex_unlock(&adm_ctx.resource->adm_mutex); 2658 + mutex_unlock(&adm_ctx->resource->adm_mutex); 2724 2659 out: 2725 - drbd_adm_finish(&adm_ctx, info, retcode); 2660 + adm_ctx->reply_dh->ret_code = retcode; 2726 2661 return 0; 2727 2662 } 2728 2663 ··· 2794 2729 2795 2730 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) 2796 2731 { 2797 - struct drbd_config_context adm_ctx; 2732 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 2798 2733 struct disconnect_parms parms; 2799 2734 struct drbd_connection *connection; 2800 2735 enum drbd_state_rv rv; 2801 2736 enum drbd_ret_code retcode; 2802 2737 int err; 2803 2738 2804 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION); 2805 - if (!adm_ctx.reply_skb) 2806 - return retcode; 2739 + if (!adm_ctx->reply_skb) 2740 + return 0; 2741 + retcode = adm_ctx->reply_dh->ret_code; 2807 2742 if (retcode != NO_ERROR) 2808 2743 goto fail; 2809 2744 2810 - connection = adm_ctx.connection; 2745 + connection = adm_ctx->connection; 2811 2746 memset(&parms, 0, sizeof(parms)); 2812 2747 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) { 2813 2748 err = disconnect_parms_from_attrs(&parms, info); 2814 2749 if (err) { 2815 2750 retcode = ERR_MANDATORY_TAG; 2816 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2751 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 2817 2752 goto fail; 2818 2753 } 2819 2754 } 2820 2755 2821 - mutex_lock(&adm_ctx.resource->adm_mutex); 2756 + mutex_lock(&adm_ctx->resource->adm_mutex); 2822 2757 rv = conn_try_disconnect(connection, parms.force_disconnect); 2823 - mutex_unlock(&adm_ctx.resource->adm_mutex); 2758 + mutex_unlock(&adm_ctx->resource->adm_mutex); 2824 2759 if (rv < SS_SUCCESS) { 2825 - drbd_adm_finish(&adm_ctx, info, rv); 2760 + adm_ctx->reply_dh->ret_code = rv; 2826 2761 return 0; 2827 2762 } 2828 2763 retcode = NO_ERROR; 2829 2764 fail: 2830 - drbd_adm_finish(&adm_ctx, info, retcode); 2765 + adm_ctx->reply_dh->ret_code = retcode; 2831 2766 return 0; 2832 2767 } 2833 2768 ··· 2849 2784 2850 2785 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) 2851 2786 { 2852 - struct drbd_config_context adm_ctx; 2787 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 2853 2788 struct disk_conf *old_disk_conf, *new_disk_conf = NULL; 2854 2789 struct resize_parms rs; 2855 2790 struct drbd_device *device; ··· 2860 2795 sector_t u_size; 2861 2796 int err; 2862 2797 2863 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 2864 - if (!adm_ctx.reply_skb) 2865 - return retcode; 2798 + if (!adm_ctx->reply_skb) 2799 + return 0; 2800 + retcode = adm_ctx->reply_dh->ret_code; 2866 2801 if (retcode != NO_ERROR) 2867 2802 goto finish; 2868 2803 2869 - mutex_lock(&adm_ctx.resource->adm_mutex); 2870 - device = adm_ctx.device; 2804 + mutex_lock(&adm_ctx->resource->adm_mutex); 2805 + device = adm_ctx->device; 2871 2806 if (!get_ldev(device)) { 2872 2807 retcode = ERR_NO_DISK; 2873 2808 goto fail; ··· 2880 2815 err = resize_parms_from_attrs(&rs, info); 2881 2816 if (err) { 2882 2817 retcode = ERR_MANDATORY_TAG; 2883 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2818 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 2884 2819 goto fail_ldev; 2885 2820 } 2886 2821 } ··· 2972 2907 } 2973 2908 2974 2909 fail: 2975 - mutex_unlock(&adm_ctx.resource->adm_mutex); 2910 + mutex_unlock(&adm_ctx->resource->adm_mutex); 2976 2911 finish: 2977 - drbd_adm_finish(&adm_ctx, info, retcode); 2912 + adm_ctx->reply_dh->ret_code = retcode; 2978 2913 return 0; 2979 2914 2980 2915 fail_ldev: ··· 2985 2920 2986 2921 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info) 2987 2922 { 2988 - struct drbd_config_context adm_ctx; 2923 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 2989 2924 enum drbd_ret_code retcode; 2990 2925 struct res_opts res_opts; 2991 2926 int err; 2992 2927 2993 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); 2994 - if (!adm_ctx.reply_skb) 2995 - return retcode; 2928 + if (!adm_ctx->reply_skb) 2929 + return 0; 2930 + retcode = adm_ctx->reply_dh->ret_code; 2996 2931 if (retcode != NO_ERROR) 2997 2932 goto fail; 2998 2933 2999 - res_opts = adm_ctx.resource->res_opts; 2934 + res_opts = adm_ctx->resource->res_opts; 3000 2935 if (should_set_defaults(info)) 3001 2936 set_res_opts_defaults(&res_opts); 3002 2937 3003 2938 err = res_opts_from_attrs(&res_opts, info); 3004 2939 if (err && err != -ENOMSG) { 3005 2940 retcode = ERR_MANDATORY_TAG; 3006 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 2941 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 3007 2942 goto fail; 3008 2943 } 3009 2944 3010 - mutex_lock(&adm_ctx.resource->adm_mutex); 3011 - err = set_resource_options(adm_ctx.resource, &res_opts); 2945 + mutex_lock(&adm_ctx->resource->adm_mutex); 2946 + err = set_resource_options(adm_ctx->resource, &res_opts); 3012 2947 if (err) { 3013 2948 retcode = ERR_INVALID_REQUEST; 3014 2949 if (err == -ENOMEM) 3015 2950 retcode = ERR_NOMEM; 3016 2951 } 3017 - mutex_unlock(&adm_ctx.resource->adm_mutex); 2952 + mutex_unlock(&adm_ctx->resource->adm_mutex); 3018 2953 3019 2954 fail: 3020 - drbd_adm_finish(&adm_ctx, info, retcode); 2955 + adm_ctx->reply_dh->ret_code = retcode; 3021 2956 return 0; 3022 2957 } 3023 2958 3024 2959 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) 3025 2960 { 3026 - struct drbd_config_context adm_ctx; 2961 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 3027 2962 struct drbd_device *device; 3028 2963 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ 3029 2964 3030 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 3031 - if (!adm_ctx.reply_skb) 3032 - return retcode; 2965 + if (!adm_ctx->reply_skb) 2966 + return 0; 2967 + retcode = adm_ctx->reply_dh->ret_code; 3033 2968 if (retcode != NO_ERROR) 3034 2969 goto out; 3035 2970 3036 - device = adm_ctx.device; 2971 + device = adm_ctx->device; 3037 2972 if (!get_ldev(device)) { 3038 2973 retcode = ERR_NO_DISK; 3039 2974 goto out; 3040 2975 } 3041 2976 3042 - mutex_lock(&adm_ctx.resource->adm_mutex); 2977 + mutex_lock(&adm_ctx->resource->adm_mutex); 3043 2978 3044 2979 /* If there is still bitmap IO pending, probably because of a previous 3045 2980 * resync just being finished, wait for it before requesting a new resync. ··· 3062 2997 } else 3063 2998 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T)); 3064 2999 drbd_resume_io(device); 3065 - mutex_unlock(&adm_ctx.resource->adm_mutex); 3000 + mutex_unlock(&adm_ctx->resource->adm_mutex); 3066 3001 put_ldev(device); 3067 3002 out: 3068 - drbd_adm_finish(&adm_ctx, info, retcode); 3003 + adm_ctx->reply_dh->ret_code = retcode; 3069 3004 return 0; 3070 3005 } 3071 3006 3072 3007 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info, 3073 3008 union drbd_state mask, union drbd_state val) 3074 3009 { 3075 - struct drbd_config_context adm_ctx; 3010 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 3076 3011 enum drbd_ret_code retcode; 3077 3012 3078 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 3079 - if (!adm_ctx.reply_skb) 3080 - return retcode; 3013 + if (!adm_ctx->reply_skb) 3014 + return 0; 3015 + retcode = adm_ctx->reply_dh->ret_code; 3081 3016 if (retcode != NO_ERROR) 3082 3017 goto out; 3083 3018 3084 - mutex_lock(&adm_ctx.resource->adm_mutex); 3085 - retcode = drbd_request_state(adm_ctx.device, mask, val); 3086 - mutex_unlock(&adm_ctx.resource->adm_mutex); 3019 + mutex_lock(&adm_ctx->resource->adm_mutex); 3020 + retcode = drbd_request_state(adm_ctx->device, mask, val); 3021 + mutex_unlock(&adm_ctx->resource->adm_mutex); 3087 3022 out: 3088 - drbd_adm_finish(&adm_ctx, info, retcode); 3023 + adm_ctx->reply_dh->ret_code = retcode; 3089 3024 return 0; 3090 3025 } 3091 3026 ··· 3101 3036 3102 3037 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) 3103 3038 { 3104 - struct drbd_config_context adm_ctx; 3039 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 3105 3040 int retcode; /* drbd_ret_code, drbd_state_rv */ 3106 3041 struct drbd_device *device; 3107 3042 3108 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 3109 - if (!adm_ctx.reply_skb) 3110 - return retcode; 3043 + if (!adm_ctx->reply_skb) 3044 + return 0; 3045 + retcode = adm_ctx->reply_dh->ret_code; 3111 3046 if (retcode != NO_ERROR) 3112 3047 goto out; 3113 3048 3114 - device = adm_ctx.device; 3049 + device = adm_ctx->device; 3115 3050 if (!get_ldev(device)) { 3116 3051 retcode = ERR_NO_DISK; 3117 3052 goto out; 3118 3053 } 3119 3054 3120 - mutex_lock(&adm_ctx.resource->adm_mutex); 3055 + mutex_lock(&adm_ctx->resource->adm_mutex); 3121 3056 3122 3057 /* If there is still bitmap IO pending, probably because of a previous 3123 3058 * resync just being finished, wait for it before requesting a new resync. ··· 3143 3078 } else 3144 3079 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S)); 3145 3080 drbd_resume_io(device); 3146 - mutex_unlock(&adm_ctx.resource->adm_mutex); 3081 + mutex_unlock(&adm_ctx->resource->adm_mutex); 3147 3082 put_ldev(device); 3148 3083 out: 3149 - drbd_adm_finish(&adm_ctx, info, retcode); 3084 + adm_ctx->reply_dh->ret_code = retcode; 3150 3085 return 0; 3151 3086 } 3152 3087 3153 3088 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info) 3154 3089 { 3155 - struct drbd_config_context adm_ctx; 3090 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 3156 3091 enum drbd_ret_code retcode; 3157 3092 3158 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 3159 - if (!adm_ctx.reply_skb) 3160 - return retcode; 3093 + if (!adm_ctx->reply_skb) 3094 + return 0; 3095 + retcode = adm_ctx->reply_dh->ret_code; 3161 3096 if (retcode != NO_ERROR) 3162 3097 goto out; 3163 3098 3164 - mutex_lock(&adm_ctx.resource->adm_mutex); 3165 - if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 3099 + mutex_lock(&adm_ctx->resource->adm_mutex); 3100 + if (drbd_request_state(adm_ctx->device, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 3166 3101 retcode = ERR_PAUSE_IS_SET; 3167 - mutex_unlock(&adm_ctx.resource->adm_mutex); 3102 + mutex_unlock(&adm_ctx->resource->adm_mutex); 3168 3103 out: 3169 - drbd_adm_finish(&adm_ctx, info, retcode); 3104 + adm_ctx->reply_dh->ret_code = retcode; 3170 3105 return 0; 3171 3106 } 3172 3107 3173 3108 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info) 3174 3109 { 3175 - struct drbd_config_context adm_ctx; 3110 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 3176 3111 union drbd_dev_state s; 3177 3112 enum drbd_ret_code retcode; 3178 3113 3179 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 3180 - if (!adm_ctx.reply_skb) 3181 - return retcode; 3114 + if (!adm_ctx->reply_skb) 3115 + return 0; 3116 + retcode = adm_ctx->reply_dh->ret_code; 3182 3117 if (retcode != NO_ERROR) 3183 3118 goto out; 3184 3119 3185 - mutex_lock(&adm_ctx.resource->adm_mutex); 3186 - if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { 3187 - s = adm_ctx.device->state; 3120 + mutex_lock(&adm_ctx->resource->adm_mutex); 3121 + if (drbd_request_state(adm_ctx->device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { 3122 + s = adm_ctx->device->state; 3188 3123 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { 3189 3124 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP : 3190 3125 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR; ··· 3192 3127 retcode = ERR_PAUSE_IS_CLEAR; 3193 3128 } 3194 3129 } 3195 - mutex_unlock(&adm_ctx.resource->adm_mutex); 3130 + mutex_unlock(&adm_ctx->resource->adm_mutex); 3196 3131 out: 3197 - drbd_adm_finish(&adm_ctx, info, retcode); 3132 + adm_ctx->reply_dh->ret_code = retcode; 3198 3133 return 0; 3199 3134 } 3200 3135 ··· 3205 3140 3206 3141 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info) 3207 3142 { 3208 - struct drbd_config_context adm_ctx; 3143 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 3209 3144 struct drbd_device *device; 3210 3145 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ 3211 3146 3212 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 3213 - if (!adm_ctx.reply_skb) 3214 - return retcode; 3147 + if (!adm_ctx->reply_skb) 3148 + return 0; 3149 + retcode = adm_ctx->reply_dh->ret_code; 3215 3150 if (retcode != NO_ERROR) 3216 3151 goto out; 3217 3152 3218 - mutex_lock(&adm_ctx.resource->adm_mutex); 3219 - device = adm_ctx.device; 3153 + mutex_lock(&adm_ctx->resource->adm_mutex); 3154 + device = adm_ctx->device; 3220 3155 if (test_bit(NEW_CUR_UUID, &device->flags)) { 3221 3156 if (get_ldev_if_state(device, D_ATTACHING)) { 3222 3157 drbd_uuid_new_current(device); ··· 3253 3188 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO); 3254 3189 } 3255 3190 drbd_resume_io(device); 3256 - mutex_unlock(&adm_ctx.resource->adm_mutex); 3191 + mutex_unlock(&adm_ctx->resource->adm_mutex); 3257 3192 out: 3258 - drbd_adm_finish(&adm_ctx, info, retcode); 3193 + adm_ctx->reply_dh->ret_code = retcode; 3259 3194 return 0; 3260 3195 } 3261 3196 ··· 3908 3843 3909 3844 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info) 3910 3845 { 3911 - struct drbd_config_context adm_ctx; 3846 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 3912 3847 enum drbd_ret_code retcode; 3913 3848 int err; 3914 3849 3915 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 3916 - if (!adm_ctx.reply_skb) 3917 - return retcode; 3850 + if (!adm_ctx->reply_skb) 3851 + return 0; 3852 + retcode = adm_ctx->reply_dh->ret_code; 3918 3853 if (retcode != NO_ERROR) 3919 3854 goto out; 3920 3855 3921 - err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL); 3856 + err = nla_put_status_info(adm_ctx->reply_skb, adm_ctx->device, NULL); 3922 3857 if (err) { 3923 - nlmsg_free(adm_ctx.reply_skb); 3858 + nlmsg_free(adm_ctx->reply_skb); 3859 + adm_ctx->reply_skb = NULL; 3924 3860 return err; 3925 3861 } 3926 3862 out: 3927 - drbd_adm_finish(&adm_ctx, info, retcode); 3863 + adm_ctx->reply_dh->ret_code = retcode; 3928 3864 return 0; 3929 3865 } 3930 3866 ··· 4112 4046 4113 4047 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info) 4114 4048 { 4115 - struct drbd_config_context adm_ctx; 4049 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 4116 4050 enum drbd_ret_code retcode; 4117 4051 struct timeout_parms tp; 4118 4052 int err; 4119 4053 4120 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 4121 - if (!adm_ctx.reply_skb) 4122 - return retcode; 4054 + if (!adm_ctx->reply_skb) 4055 + return 0; 4056 + retcode = adm_ctx->reply_dh->ret_code; 4123 4057 if (retcode != NO_ERROR) 4124 4058 goto out; 4125 4059 4126 4060 tp.timeout_type = 4127 - adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 4128 - test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED : 4061 + adm_ctx->device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 4062 + test_bit(USE_DEGR_WFC_T, &adm_ctx->device->flags) ? UT_DEGRADED : 4129 4063 UT_DEFAULT; 4130 4064 4131 - err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp); 4065 + err = timeout_parms_to_priv_skb(adm_ctx->reply_skb, &tp); 4132 4066 if (err) { 4133 - nlmsg_free(adm_ctx.reply_skb); 4067 + nlmsg_free(adm_ctx->reply_skb); 4068 + adm_ctx->reply_skb = NULL; 4134 4069 return err; 4135 4070 } 4136 4071 out: 4137 - drbd_adm_finish(&adm_ctx, info, retcode); 4072 + adm_ctx->reply_dh->ret_code = retcode; 4138 4073 return 0; 4139 4074 } 4140 4075 4141 4076 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info) 4142 4077 { 4143 - struct drbd_config_context adm_ctx; 4078 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 4144 4079 struct drbd_device *device; 4145 4080 enum drbd_ret_code retcode; 4146 4081 struct start_ov_parms parms; 4147 4082 4148 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 4149 - if (!adm_ctx.reply_skb) 4150 - return retcode; 4083 + if (!adm_ctx->reply_skb) 4084 + return 0; 4085 + retcode = adm_ctx->reply_dh->ret_code; 4151 4086 if (retcode != NO_ERROR) 4152 4087 goto out; 4153 4088 4154 - device = adm_ctx.device; 4089 + device = adm_ctx->device; 4155 4090 4156 4091 /* resume from last known position, if possible */ 4157 4092 parms.ov_start_sector = device->ov_start_sector; ··· 4161 4094 int err = start_ov_parms_from_attrs(&parms, info); 4162 4095 if (err) { 4163 4096 retcode = ERR_MANDATORY_TAG; 4164 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 4097 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 4165 4098 goto out; 4166 4099 } 4167 4100 } 4168 - mutex_lock(&adm_ctx.resource->adm_mutex); 4101 + mutex_lock(&adm_ctx->resource->adm_mutex); 4169 4102 4170 4103 /* w_make_ov_request expects position to be aligned */ 4171 4104 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1); ··· 4178 4111 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S)); 4179 4112 drbd_resume_io(device); 4180 4113 4181 - mutex_unlock(&adm_ctx.resource->adm_mutex); 4114 + mutex_unlock(&adm_ctx->resource->adm_mutex); 4182 4115 out: 4183 - drbd_adm_finish(&adm_ctx, info, retcode); 4116 + adm_ctx->reply_dh->ret_code = retcode; 4184 4117 return 0; 4185 4118 } 4186 4119 4187 4120 4188 4121 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info) 4189 4122 { 4190 - struct drbd_config_context adm_ctx; 4123 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 4191 4124 struct drbd_device *device; 4192 4125 enum drbd_ret_code retcode; 4193 4126 int skip_initial_sync = 0; 4194 4127 int err; 4195 4128 struct new_c_uuid_parms args; 4196 4129 4197 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 4198 - if (!adm_ctx.reply_skb) 4199 - return retcode; 4130 + if (!adm_ctx->reply_skb) 4131 + return 0; 4132 + retcode = adm_ctx->reply_dh->ret_code; 4200 4133 if (retcode != NO_ERROR) 4201 4134 goto out_nolock; 4202 4135 4203 - device = adm_ctx.device; 4136 + device = adm_ctx->device; 4204 4137 memset(&args, 0, sizeof(args)); 4205 4138 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) { 4206 4139 err = new_c_uuid_parms_from_attrs(&args, info); 4207 4140 if (err) { 4208 4141 retcode = ERR_MANDATORY_TAG; 4209 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 4142 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 4210 4143 goto out_nolock; 4211 4144 } 4212 4145 } 4213 4146 4214 - mutex_lock(&adm_ctx.resource->adm_mutex); 4147 + mutex_lock(&adm_ctx->resource->adm_mutex); 4215 4148 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */ 4216 4149 4217 4150 if (!get_ldev(device)) { ··· 4256 4189 put_ldev(device); 4257 4190 out: 4258 4191 mutex_unlock(device->state_mutex); 4259 - mutex_unlock(&adm_ctx.resource->adm_mutex); 4192 + mutex_unlock(&adm_ctx->resource->adm_mutex); 4260 4193 out_nolock: 4261 - drbd_adm_finish(&adm_ctx, info, retcode); 4194 + adm_ctx->reply_dh->ret_code = retcode; 4262 4195 return 0; 4263 4196 } 4264 4197 ··· 4291 4224 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info) 4292 4225 { 4293 4226 struct drbd_connection *connection; 4294 - struct drbd_config_context adm_ctx; 4227 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 4295 4228 enum drbd_ret_code retcode; 4296 4229 struct res_opts res_opts; 4297 4230 int err; 4298 4231 4299 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0); 4300 - if (!adm_ctx.reply_skb) 4301 - return retcode; 4232 + if (!adm_ctx->reply_skb) 4233 + return 0; 4234 + retcode = adm_ctx->reply_dh->ret_code; 4302 4235 if (retcode != NO_ERROR) 4303 4236 goto out; 4304 4237 ··· 4306 4239 err = res_opts_from_attrs(&res_opts, info); 4307 4240 if (err && err != -ENOMSG) { 4308 4241 retcode = ERR_MANDATORY_TAG; 4309 - drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); 4242 + drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err)); 4310 4243 goto out; 4311 4244 } 4312 4245 4313 - retcode = drbd_check_resource_name(&adm_ctx); 4246 + retcode = drbd_check_resource_name(adm_ctx); 4314 4247 if (retcode != NO_ERROR) 4315 4248 goto out; 4316 4249 4317 - if (adm_ctx.resource) { 4250 + if (adm_ctx->resource) { 4318 4251 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) { 4319 4252 retcode = ERR_INVALID_REQUEST; 4320 - drbd_msg_put_info(adm_ctx.reply_skb, "resource exists"); 4253 + drbd_msg_put_info(adm_ctx->reply_skb, "resource exists"); 4321 4254 } 4322 4255 /* else: still NO_ERROR */ 4323 4256 goto out; ··· 4325 4258 4326 4259 /* not yet safe for genl_family.parallel_ops */ 4327 4260 mutex_lock(&resources_mutex); 4328 - connection = conn_create(adm_ctx.resource_name, &res_opts); 4261 + connection = conn_create(adm_ctx->resource_name, &res_opts); 4329 4262 mutex_unlock(&resources_mutex); 4330 4263 4331 4264 if (connection) { ··· 4340 4273 retcode = ERR_NOMEM; 4341 4274 4342 4275 out: 4343 - drbd_adm_finish(&adm_ctx, info, retcode); 4276 + adm_ctx->reply_dh->ret_code = retcode; 4344 4277 return 0; 4345 4278 } 4346 4279 ··· 4353 4286 4354 4287 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info) 4355 4288 { 4356 - struct drbd_config_context adm_ctx; 4289 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 4357 4290 struct drbd_genlmsghdr *dh = genl_info_userhdr(info); 4358 4291 enum drbd_ret_code retcode; 4359 4292 4360 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); 4361 - if (!adm_ctx.reply_skb) 4362 - return retcode; 4293 + if (!adm_ctx->reply_skb) 4294 + return 0; 4295 + retcode = adm_ctx->reply_dh->ret_code; 4363 4296 if (retcode != NO_ERROR) 4364 4297 goto out; 4365 4298 4366 4299 if (dh->minor > MINORMASK) { 4367 - drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range"); 4300 + drbd_msg_put_info(adm_ctx->reply_skb, "requested minor out of range"); 4368 4301 retcode = ERR_INVALID_REQUEST; 4369 4302 goto out; 4370 4303 } 4371 - if (adm_ctx.volume > DRBD_VOLUME_MAX) { 4372 - drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range"); 4304 + if (adm_ctx->volume > DRBD_VOLUME_MAX) { 4305 + drbd_msg_put_info(adm_ctx->reply_skb, "requested volume id out of range"); 4373 4306 retcode = ERR_INVALID_REQUEST; 4374 4307 goto out; 4375 4308 } 4376 4309 4377 4310 /* drbd_adm_prepare made sure already 4378 4311 * that first_peer_device(device)->connection and device->vnr match the request. */ 4379 - if (adm_ctx.device) { 4312 + if (adm_ctx->device) { 4380 4313 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) 4381 4314 retcode = ERR_MINOR_OR_VOLUME_EXISTS; 4382 4315 /* else: still NO_ERROR */ 4383 4316 goto out; 4384 4317 } 4385 4318 4386 - mutex_lock(&adm_ctx.resource->adm_mutex); 4387 - retcode = drbd_create_device(&adm_ctx, dh->minor); 4319 + mutex_lock(&adm_ctx->resource->adm_mutex); 4320 + retcode = drbd_create_device(adm_ctx, dh->minor); 4388 4321 if (retcode == NO_ERROR) { 4389 4322 struct drbd_device *device; 4390 4323 struct drbd_peer_device *peer_device; ··· 4415 4348 } 4416 4349 mutex_unlock(&notification_mutex); 4417 4350 } 4418 - mutex_unlock(&adm_ctx.resource->adm_mutex); 4351 + mutex_unlock(&adm_ctx->resource->adm_mutex); 4419 4352 out: 4420 - drbd_adm_finish(&adm_ctx, info, retcode); 4353 + adm_ctx->reply_dh->ret_code = retcode; 4421 4354 return 0; 4422 4355 } 4423 4356 ··· 4460 4393 4461 4394 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info) 4462 4395 { 4463 - struct drbd_config_context adm_ctx; 4396 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 4464 4397 enum drbd_ret_code retcode; 4465 4398 4466 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); 4467 - if (!adm_ctx.reply_skb) 4468 - return retcode; 4399 + if (!adm_ctx->reply_skb) 4400 + return 0; 4401 + retcode = adm_ctx->reply_dh->ret_code; 4469 4402 if (retcode != NO_ERROR) 4470 4403 goto out; 4471 4404 4472 - mutex_lock(&adm_ctx.resource->adm_mutex); 4473 - retcode = adm_del_minor(adm_ctx.device); 4474 - mutex_unlock(&adm_ctx.resource->adm_mutex); 4405 + mutex_lock(&adm_ctx->resource->adm_mutex); 4406 + retcode = adm_del_minor(adm_ctx->device); 4407 + mutex_unlock(&adm_ctx->resource->adm_mutex); 4475 4408 out: 4476 - drbd_adm_finish(&adm_ctx, info, retcode); 4409 + adm_ctx->reply_dh->ret_code = retcode; 4477 4410 return 0; 4478 4411 } 4479 4412 ··· 4509 4442 4510 4443 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) 4511 4444 { 4512 - struct drbd_config_context adm_ctx; 4445 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 4513 4446 struct drbd_resource *resource; 4514 4447 struct drbd_connection *connection; 4515 4448 struct drbd_device *device; 4516 4449 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ 4517 4450 unsigned i; 4518 4451 4519 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); 4520 - if (!adm_ctx.reply_skb) 4521 - return retcode; 4452 + if (!adm_ctx->reply_skb) 4453 + return 0; 4454 + retcode = adm_ctx->reply_dh->ret_code; 4522 4455 if (retcode != NO_ERROR) 4523 4456 goto finish; 4524 4457 4525 - resource = adm_ctx.resource; 4458 + resource = adm_ctx->resource; 4526 4459 mutex_lock(&resource->adm_mutex); 4527 4460 /* demote */ 4528 4461 for_each_connection(connection, resource) { ··· 4531 4464 idr_for_each_entry(&connection->peer_devices, peer_device, i) { 4532 4465 retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0); 4533 4466 if (retcode < SS_SUCCESS) { 4534 - drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote"); 4467 + drbd_msg_put_info(adm_ctx->reply_skb, "failed to demote"); 4535 4468 goto out; 4536 4469 } 4537 4470 } 4538 4471 4539 4472 retcode = conn_try_disconnect(connection, 0); 4540 4473 if (retcode < SS_SUCCESS) { 4541 - drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect"); 4474 + drbd_msg_put_info(adm_ctx->reply_skb, "failed to disconnect"); 4542 4475 goto out; 4543 4476 } 4544 4477 } ··· 4547 4480 idr_for_each_entry(&resource->devices, device, i) { 4548 4481 retcode = adm_detach(device, 0); 4549 4482 if (retcode < SS_SUCCESS || retcode > NO_ERROR) { 4550 - drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach"); 4483 + drbd_msg_put_info(adm_ctx->reply_skb, "failed to detach"); 4551 4484 goto out; 4552 4485 } 4553 4486 } ··· 4557 4490 retcode = adm_del_minor(device); 4558 4491 if (retcode != NO_ERROR) { 4559 4492 /* "can not happen" */ 4560 - drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume"); 4493 + drbd_msg_put_info(adm_ctx->reply_skb, "failed to delete volume"); 4561 4494 goto out; 4562 4495 } 4563 4496 } ··· 4566 4499 out: 4567 4500 mutex_unlock(&resource->adm_mutex); 4568 4501 finish: 4569 - drbd_adm_finish(&adm_ctx, info, retcode); 4502 + adm_ctx->reply_dh->ret_code = retcode; 4570 4503 return 0; 4571 4504 } 4572 4505 4573 4506 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info) 4574 4507 { 4575 - struct drbd_config_context adm_ctx; 4508 + struct drbd_config_context *adm_ctx = info->user_ptr[0]; 4576 4509 struct drbd_resource *resource; 4577 4510 enum drbd_ret_code retcode; 4578 4511 4579 - retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); 4580 - if (!adm_ctx.reply_skb) 4581 - return retcode; 4512 + if (!adm_ctx->reply_skb) 4513 + return 0; 4514 + retcode = adm_ctx->reply_dh->ret_code; 4582 4515 if (retcode != NO_ERROR) 4583 4516 goto finish; 4584 - resource = adm_ctx.resource; 4517 + resource = adm_ctx->resource; 4585 4518 4586 4519 mutex_lock(&resource->adm_mutex); 4587 4520 retcode = adm_del_resource(resource); 4588 4521 mutex_unlock(&resource->adm_mutex); 4589 4522 finish: 4590 - drbd_adm_finish(&adm_ctx, info, retcode); 4523 + adm_ctx->reply_dh->ret_code = retcode; 4591 4524 return 0; 4592 4525 } 4593 4526
+4
include/linux/genl_magic_func.h
··· 292 292 #endif 293 293 .maxattr = ARRAY_SIZE(CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy))-1, 294 294 .policy = CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy), 295 + #ifdef GENL_MAGIC_FAMILY_PRE_DOIT 296 + .pre_doit = GENL_MAGIC_FAMILY_PRE_DOIT, 297 + .post_doit = GENL_MAGIC_FAMILY_POST_DOIT, 298 + #endif 295 299 .ops = ZZZ_genl_ops, 296 300 .n_ops = ARRAY_SIZE(ZZZ_genl_ops), 297 301 .mcgrps = ZZZ_genl_mcgrps,