Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag '6.17-rc6-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6

Pull smb client fixes from Steve French:

- Two unlink fixes: one for rename and one for deferred close

- Four smbdirect/RDMA fixes: fix buffer leak in negotiate, two fixes
for races in smbd_destroy, fix offset and length checks in recv_done

* tag '6.17-rc6-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6:
smb: client: fix smbdirect_recv_io leak in smbd_negotiate() error path
smb: client: fix file open check in __cifs_unlink()
smb: client: let smbd_destroy() call disable_work_sync(&info->post_send_credits_work)
smb: client: use disable[_delayed]_work_sync in smbdirect.c
smb: client: fix filename matching of deferred files
smb: client: let recv_done verify data_offset, data_length and remaining_data_length

+63 -33
+2 -2
fs/smb/client/cifsproto.h
··· 312 312 313 313 extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon); 314 314 315 - extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon, 316 - const char *path); 315 + void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon, 316 + struct dentry *dentry); 317 317 318 318 extern void cifs_mark_open_handles_for_deleted_file(struct inode *inode, 319 319 const char *path);
+18 -5
fs/smb/client/inode.c
··· 1984 1984 } 1985 1985 1986 1986 netfs_wait_for_outstanding_io(inode); 1987 - cifs_close_deferred_file_under_dentry(tcon, full_path); 1987 + cifs_close_deferred_file_under_dentry(tcon, dentry); 1988 1988 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1989 1989 if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & 1990 1990 le64_to_cpu(tcon->fsUnixInfo.Capability))) { ··· 2003 2003 goto psx_del_no_retry; 2004 2004 } 2005 2005 2006 - if (sillyrename || (server->vals->protocol_id > SMB10_PROT_ID && 2007 - d_is_positive(dentry) && d_count(dentry) > 2)) 2006 + /* For SMB2+, if the file is open, we always perform a silly rename. 2007 + * 2008 + * We check for d_count() right after calling 2009 + * cifs_close_deferred_file_under_dentry() to make sure that the 2010 + * dentry's refcount gets dropped in case the file had any deferred 2011 + * close. 2012 + */ 2013 + if (!sillyrename && server->vals->protocol_id > SMB10_PROT_ID) { 2014 + spin_lock(&dentry->d_lock); 2015 + if (d_count(dentry) > 1) 2016 + sillyrename = true; 2017 + spin_unlock(&dentry->d_lock); 2018 + } 2019 + 2020 + if (sillyrename) 2008 2021 rc = -EBUSY; 2009 2022 else 2010 2023 rc = server->ops->unlink(xid, tcon, full_path, cifs_sb, dentry); ··· 2551 2538 goto cifs_rename_exit; 2552 2539 } 2553 2540 2554 - cifs_close_deferred_file_under_dentry(tcon, from_name); 2541 + cifs_close_deferred_file_under_dentry(tcon, source_dentry); 2555 2542 if (d_inode(target_dentry) != NULL) { 2556 2543 netfs_wait_for_outstanding_io(d_inode(target_dentry)); 2557 - cifs_close_deferred_file_under_dentry(tcon, to_name); 2544 + cifs_close_deferred_file_under_dentry(tcon, target_dentry); 2558 2545 } 2559 2546 2560 2547 rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
+15 -21
fs/smb/client/misc.c
··· 832 832 kfree(tmp_list); 833 833 } 834 834 } 835 - void 836 - cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) 835 + 836 + void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, 837 + struct dentry *dentry) 837 838 { 838 - struct cifsFileInfo *cfile; 839 839 struct file_list *tmp_list, *tmp_next_list; 840 - void *page; 841 - const char *full_path; 840 + struct cifsFileInfo *cfile; 842 841 LIST_HEAD(file_head); 843 842 844 - page = alloc_dentry_path(); 845 843 spin_lock(&tcon->open_file_lock); 846 844 list_for_each_entry(cfile, &tcon->openFileList, tlist) { 847 - full_path = build_path_from_dentry(cfile->dentry, page); 848 - if (strstr(full_path, path)) { 849 - if (delayed_work_pending(&cfile->deferred)) { 850 - if (cancel_delayed_work(&cfile->deferred)) { 851 - spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 852 - cifs_del_deferred_close(cfile); 853 - spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 845 + if ((cfile->dentry == dentry) && 846 + delayed_work_pending(&cfile->deferred) && 847 + cancel_delayed_work(&cfile->deferred)) { 848 + spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 849 + cifs_del_deferred_close(cfile); 850 + spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 854 851 855 - tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); 856 - if (tmp_list == NULL) 857 - break; 858 - tmp_list->cfile = cfile; 859 - list_add_tail(&tmp_list->list, &file_head); 860 - } 861 - } 852 + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); 853 + if (tmp_list == NULL) 854 + break; 855 + tmp_list->cfile = cfile; 856 + list_add_tail(&tmp_list->list, &file_head); 862 857 } 863 858 } 864 859 spin_unlock(&tcon->open_file_lock); ··· 863 868 list_del(&tmp_list->list); 864 869 kfree(tmp_list); 865 870 } 866 - free_dentry_path(page); 867 871 } 868 872 869 873 /*
+28 -5
fs/smb/client/smbdirect.c
··· 453 453 struct smbdirect_recv_io *response = 454 454 container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe); 455 455 struct smbdirect_socket *sc = response->socket; 456 + struct smbdirect_socket_parameters *sp = &sc->parameters; 456 457 struct smbd_connection *info = 457 458 container_of(sc, struct smbd_connection, socket); 458 - int data_length = 0; 459 + u32 data_offset = 0; 460 + u32 data_length = 0; 461 + u32 remaining_data_length = 0; 459 462 460 463 log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n", 461 464 response, sc->recv_io.expected, wc->status, wc->opcode, ··· 490 487 /* SMBD data transfer packet */ 491 488 case SMBDIRECT_EXPECT_DATA_TRANSFER: 492 489 data_transfer = smbdirect_recv_io_payload(response); 490 + 491 + if (wc->byte_len < 492 + offsetof(struct smbdirect_data_transfer, padding)) 493 + goto error; 494 + 495 + remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length); 496 + data_offset = le32_to_cpu(data_transfer->data_offset); 493 497 data_length = le32_to_cpu(data_transfer->data_length); 498 + if (wc->byte_len < data_offset || 499 + (u64)wc->byte_len < (u64)data_offset + data_length) 500 + goto error; 501 + 502 + if (remaining_data_length > sp->max_fragmented_recv_size || 503 + data_length > sp->max_fragmented_recv_size || 504 + (u64)remaining_data_length + (u64)data_length > (u64)sp->max_fragmented_recv_size) 505 + goto error; 494 506 495 507 if (data_length) { 496 508 if (sc->recv_io.reassembly.full_packet_received) ··· 1108 1090 log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n", 1109 1091 rc, response->sge.addr, 1110 1092 response->sge.length, response->sge.lkey); 1111 - if (rc) 1093 + if (rc) { 1094 + put_receive_buffer(info, response); 1112 1095 return rc; 1096 + } 1113 1097 1114 1098 init_completion(&info->negotiate_completion); 1115 1099 info->negotiate_done = false; ··· 1349 1329 sc->status == SMBDIRECT_SOCKET_DISCONNECTED); 1350 1330 } 1351 1331 1332 + log_rdma_event(INFO, "cancelling post_send_credits_work\n"); 1333 + disable_work_sync(&info->post_send_credits_work); 1334 + 1352 1335 log_rdma_event(INFO, "destroying qp\n"); 1353 1336 ib_drain_qp(sc->ib.qp); 1354 1337 rdma_destroy_qp(sc->rdma.cm_id); 1355 1338 sc->ib.qp = NULL; 1356 1339 1357 1340 log_rdma_event(INFO, "cancelling idle timer\n"); 1358 - cancel_delayed_work_sync(&info->idle_timer_work); 1341 + disable_delayed_work_sync(&info->idle_timer_work); 1359 1342 1360 1343 /* It's not possible for upper layer to get to reassembly */ 1361 1344 log_rdma_event(INFO, "drain the reassembly queue\n"); ··· 1731 1708 return NULL; 1732 1709 1733 1710 negotiation_failed: 1734 - cancel_delayed_work_sync(&info->idle_timer_work); 1711 + disable_delayed_work_sync(&info->idle_timer_work); 1735 1712 destroy_caches_and_workqueue(info); 1736 1713 sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED; 1737 1714 rdma_disconnect(sc->rdma.cm_id); ··· 2090 2067 struct smbdirect_socket *sc = &info->socket; 2091 2068 struct smbd_mr *mr, *tmp; 2092 2069 2093 - cancel_work_sync(&info->mr_recovery_work); 2070 + disable_work_sync(&info->mr_recovery_work); 2094 2071 list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { 2095 2072 if (mr->state == MR_INVALIDATED) 2096 2073 ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl,