Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'char-misc-4.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver fixes from Greg KH:
"Here are 4 patches to resolve some char/misc driver issues found these
past weeks.

One of them is a mei bugfix and another is a new mei device id. There
is also a hyper-v fix for a reported issue, and a binder issue fix for
a problem reported by a few people.

All of these have been in my tree for a while, I don't know if
linux-next is really testing much this month. But 0-day is happy with
them :)"

* tag 'char-misc-4.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
binder: fix use-after-free in binder_transaction()
Drivers: hv: vmbus: Fix bugs in rescind handling
mei: me: add gemini lake devices id
mei: always use domain runtime pm callbacks.

+116 -82
+67 -28
drivers/android/binder.c
··· 2582 2582 return true; 2583 2583 } 2584 2584 2585 + /** 2586 + * binder_get_node_refs_for_txn() - Get required refs on node for txn 2587 + * @node: struct binder_node for which to get refs 2588 + * @proc: returns @node->proc if valid 2589 + * @error: if no @proc then returns BR_DEAD_REPLY 2590 + * 2591 + * User-space normally keeps the node alive when creating a transaction 2592 + * since it has a reference to the target. The local strong ref keeps it 2593 + * alive if the sending process dies before the target process processes 2594 + * the transaction. If the source process is malicious or has a reference 2595 + * counting bug, relying on the local strong ref can fail. 2596 + * 2597 + * Since user-space can cause the local strong ref to go away, we also take 2598 + * a tmpref on the node to ensure it survives while we are constructing 2599 + * the transaction. We also need a tmpref on the proc while we are 2600 + * constructing the transaction, so we take that here as well. 2601 + * 2602 + * Return: The target_node with refs taken or NULL if no @node->proc is NULL. 2603 + * Also sets @proc if valid. If the @node->proc is NULL indicating that the 2604 + * target proc has died, @error is set to BR_DEAD_REPLY 2605 + */ 2606 + static struct binder_node *binder_get_node_refs_for_txn( 2607 + struct binder_node *node, 2608 + struct binder_proc **procp, 2609 + uint32_t *error) 2610 + { 2611 + struct binder_node *target_node = NULL; 2612 + 2613 + binder_node_inner_lock(node); 2614 + if (node->proc) { 2615 + target_node = node; 2616 + binder_inc_node_nilocked(node, 1, 0, NULL); 2617 + binder_inc_node_tmpref_ilocked(node); 2618 + node->proc->tmp_ref++; 2619 + *procp = node->proc; 2620 + } else 2621 + *error = BR_DEAD_REPLY; 2622 + binder_node_inner_unlock(node); 2623 + 2624 + return target_node; 2625 + } 2626 + 2585 2627 static void binder_transaction(struct binder_proc *proc, 2586 2628 struct binder_thread *thread, 2587 2629 struct binder_transaction_data *tr, int reply, ··· 2727 2685 ref = binder_get_ref_olocked(proc, tr->target.handle, 2728 2686 true); 2729 2687 if (ref) { 2730 - binder_inc_node(ref->node, 1, 0, NULL); 2731 - target_node = ref->node; 2688 + target_node = binder_get_node_refs_for_txn( 2689 + ref->node, &target_proc, 2690 + &return_error); 2691 + } else { 2692 + binder_user_error("%d:%d got transaction to invalid handle\n", 2693 + proc->pid, thread->pid); 2694 + return_error = BR_FAILED_REPLY; 2732 2695 } 2733 2696 binder_proc_unlock(proc); 2734 - if (target_node == NULL) { 2735 - binder_user_error("%d:%d got transaction to invalid handle\n", 2736 - proc->pid, thread->pid); 2737 - return_error = BR_FAILED_REPLY; 2738 - return_error_param = -EINVAL; 2739 - return_error_line = __LINE__; 2740 - goto err_invalid_target_handle; 2741 - } 2742 2697 } else { 2743 2698 mutex_lock(&context->context_mgr_node_lock); 2744 2699 target_node = context->binder_context_mgr_node; 2745 - if (target_node == NULL) { 2700 + if (target_node) 2701 + target_node = binder_get_node_refs_for_txn( 2702 + target_node, &target_proc, 2703 + &return_error); 2704 + else 2746 2705 return_error = BR_DEAD_REPLY; 2747 - mutex_unlock(&context->context_mgr_node_lock); 2748 - return_error_line = __LINE__; 2749 - goto err_no_context_mgr_node; 2750 - } 2751 - binder_inc_node(target_node, 1, 0, NULL); 2752 2706 mutex_unlock(&context->context_mgr_node_lock); 2753 2707 } 2754 - e->to_node = target_node->debug_id; 2755 - binder_node_lock(target_node); 2756 - target_proc = target_node->proc; 2757 - if (target_proc == NULL) { 2758 - binder_node_unlock(target_node); 2759 - return_error = BR_DEAD_REPLY; 2708 + if (!target_node) { 2709 + /* 2710 + * return_error is set above 2711 + */ 2712 + return_error_param = -EINVAL; 2760 2713 return_error_line = __LINE__; 2761 2714 goto err_dead_binder; 2762 2715 } 2763 - binder_inner_proc_lock(target_proc); 2764 - target_proc->tmp_ref++; 2765 - binder_inner_proc_unlock(target_proc); 2766 - binder_node_unlock(target_node); 2716 + e->to_node = target_node->debug_id; 2767 2717 if (security_binder_transaction(proc->tsk, 2768 2718 target_proc->tsk) < 0) { 2769 2719 return_error = BR_FAILED_REPLY; ··· 3105 3071 if (target_thread) 3106 3072 binder_thread_dec_tmpref(target_thread); 3107 3073 binder_proc_dec_tmpref(target_proc); 3074 + if (target_node) 3075 + binder_dec_node_tmpref(target_node); 3108 3076 /* 3109 3077 * write barrier to synchronize with initialization 3110 3078 * of log entry ··· 3126 3090 err_copy_data_failed: 3127 3091 trace_binder_transaction_failed_buffer_release(t->buffer); 3128 3092 binder_transaction_buffer_release(target_proc, t->buffer, offp); 3093 + if (target_node) 3094 + binder_dec_node_tmpref(target_node); 3129 3095 target_node = NULL; 3130 3096 t->buffer->transaction = NULL; 3131 3097 binder_alloc_free_buf(&target_proc->alloc, t->buffer); ··· 3142 3104 err_empty_call_stack: 3143 3105 err_dead_binder: 3144 3106 err_invalid_target_handle: 3145 - err_no_context_mgr_node: 3146 3107 if (target_thread) 3147 3108 binder_thread_dec_tmpref(target_thread); 3148 3109 if (target_proc) 3149 3110 binder_proc_dec_tmpref(target_proc); 3150 - if (target_node) 3111 + if (target_node) { 3151 3112 binder_dec_node(target_node, 1, 0); 3113 + binder_dec_node_tmpref(target_node); 3114 + } 3152 3115 3153 3116 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3154 3117 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
+3 -3
drivers/hv/channel.c
··· 640 640 */ 641 641 return; 642 642 } 643 + mutex_lock(&vmbus_connection.channel_mutex); 643 644 /* 644 645 * Close all the sub-channels first and then close the 645 646 * primary channel. ··· 649 648 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 650 649 vmbus_close_internal(cur_channel); 651 650 if (cur_channel->rescind) { 652 - mutex_lock(&vmbus_connection.channel_mutex); 653 - hv_process_channel_removal(cur_channel, 651 + hv_process_channel_removal( 654 652 cur_channel->offermsg.child_relid); 655 - mutex_unlock(&vmbus_connection.channel_mutex); 656 653 } 657 654 } 658 655 /* 659 656 * Now close the primary. 660 657 */ 661 658 vmbus_close_internal(channel); 659 + mutex_unlock(&vmbus_connection.channel_mutex); 662 660 } 663 661 EXPORT_SYMBOL_GPL(vmbus_close); 664 662
+18 -19
drivers/hv/channel_mgmt.c
··· 159 159 160 160 161 161 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 162 - 162 + channel->rescind = true; 163 163 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, 164 164 msglistentry) { 165 165 ··· 381 381 true); 382 382 } 383 383 384 - void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 384 + void hv_process_channel_removal(u32 relid) 385 385 { 386 386 unsigned long flags; 387 - struct vmbus_channel *primary_channel; 387 + struct vmbus_channel *primary_channel, *channel; 388 388 389 - BUG_ON(!channel->rescind); 390 389 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); 391 390 391 + /* 392 + * Make sure channel is valid as we may have raced. 393 + */ 394 + channel = relid2channel(relid); 395 + if (!channel) 396 + return; 397 + 398 + BUG_ON(!channel->rescind); 392 399 if (channel->target_cpu != get_cpu()) { 393 400 put_cpu(); 394 401 smp_call_function_single(channel->target_cpu, ··· 522 515 if (!fnew) { 523 516 if (channel->sc_creation_callback != NULL) 524 517 channel->sc_creation_callback(newchannel); 518 + newchannel->probe_done = true; 525 519 return; 526 520 } 527 521 ··· 842 834 { 843 835 struct vmbus_channel_rescind_offer *rescind; 844 836 struct vmbus_channel *channel; 845 - unsigned long flags; 846 837 struct device *dev; 847 838 848 839 rescind = (struct vmbus_channel_rescind_offer *)hdr; ··· 880 873 return; 881 874 } 882 875 883 - spin_lock_irqsave(&channel->lock, flags); 884 - channel->rescind = true; 885 - spin_unlock_irqrestore(&channel->lock, flags); 886 - 887 - /* 888 - * Now that we have posted the rescind state, perform 889 - * rescind related cleanup. 890 - */ 891 - vmbus_rescind_cleanup(channel); 892 - 893 876 /* 894 877 * Now wait for offer handling to complete. 895 878 */ ··· 898 901 if (channel->device_obj) { 899 902 if (channel->chn_rescind_callback) { 900 903 channel->chn_rescind_callback(channel); 904 + vmbus_rescind_cleanup(channel); 901 905 return; 902 906 } 903 907 /* ··· 907 909 */ 908 910 dev = get_device(&channel->device_obj->device); 909 911 if (dev) { 912 + vmbus_rescind_cleanup(channel); 910 913 vmbus_device_unregister(channel->device_obj); 911 914 put_device(dev); 912 915 } ··· 920 921 * 1. Close all sub-channels first 921 922 * 2. Then close the primary channel. 922 923 */ 924 + mutex_lock(&vmbus_connection.channel_mutex); 925 + vmbus_rescind_cleanup(channel); 923 926 if (channel->state == CHANNEL_OPEN_STATE) { 924 927 /* 925 928 * The channel is currently not open; 926 929 * it is safe for us to cleanup the channel. 927 930 */ 928 - mutex_lock(&vmbus_connection.channel_mutex); 929 - hv_process_channel_removal(channel, 930 - channel->offermsg.child_relid); 931 - mutex_unlock(&vmbus_connection.channel_mutex); 931 + hv_process_channel_removal(rescind->child_relid); 932 932 } 933 + mutex_unlock(&vmbus_connection.channel_mutex); 933 934 } 934 935 } 935 936
+1 -2
drivers/hv/vmbus_drv.c
··· 768 768 struct vmbus_channel *channel = hv_dev->channel; 769 769 770 770 mutex_lock(&vmbus_connection.channel_mutex); 771 - hv_process_channel_removal(channel, 772 - channel->offermsg.child_relid); 771 + hv_process_channel_removal(channel->offermsg.child_relid); 773 772 mutex_unlock(&vmbus_connection.channel_mutex); 774 773 kfree(hv_dev); 775 774
+2
drivers/misc/mei/hw-me-regs.h
··· 127 127 #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ 128 128 #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ 129 129 130 + #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ 131 + 130 132 #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ 131 133 #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ 132 134
+13 -10
drivers/misc/mei/pci-me.c
··· 93 93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, 94 94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, 95 95 96 + {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, 97 + 96 98 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, 97 99 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, 98 100 ··· 228 226 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 229 227 230 228 /* 231 - * For not wake-able HW runtime pm framework 232 - * can't be used on pci device level. 233 - * Use domain runtime pm callbacks instead. 234 - */ 235 - if (!pci_dev_run_wake(pdev)) 236 - mei_me_set_pm_domain(dev); 229 + * ME maps runtime suspend/resume to D0i states, 230 + * hence we need to go around native PCI runtime service which 231 + * eventually brings the device into D3cold/hot state, 232 + * but the mei device cannot wake up from D3 unlike from D0i3. 233 + * To get around the PCI device native runtime pm, 234 + * ME uses runtime pm domain handlers which take precedence 235 + * over the driver's pm handlers. 236 + */ 237 + mei_me_set_pm_domain(dev); 237 238 238 239 if (mei_pg_is_enabled(dev)) 239 240 pm_runtime_put_noidle(&pdev->dev); ··· 276 271 dev_dbg(&pdev->dev, "shutdown\n"); 277 272 mei_stop(dev); 278 273 279 - if (!pci_dev_run_wake(pdev)) 280 - mei_me_unset_pm_domain(dev); 274 + mei_me_unset_pm_domain(dev); 281 275 282 276 mei_disable_interrupts(dev); 283 277 free_irq(pdev->irq, dev); ··· 304 300 dev_dbg(&pdev->dev, "stop\n"); 305 301 mei_stop(dev); 306 302 307 - if (!pci_dev_run_wake(pdev)) 308 - mei_me_unset_pm_domain(dev); 303 + mei_me_unset_pm_domain(dev); 309 304 310 305 mei_disable_interrupts(dev); 311 306
+11 -19
drivers/misc/mei/pci-txe.c
··· 144 144 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 145 145 146 146 /* 147 - * For not wake-able HW runtime pm framework 148 - * can't be used on pci device level. 149 - * Use domain runtime pm callbacks instead. 150 - */ 151 - if (!pci_dev_run_wake(pdev)) 152 - mei_txe_set_pm_domain(dev); 147 + * TXE maps runtime suspend/resume to own power gating states, 148 + * hence we need to go around native PCI runtime service which 149 + * eventually brings the device into D3cold/hot state. 150 + * But the TXE device cannot wake up from D3 unlike from own 151 + * power gating. To get around PCI device native runtime pm, 152 + * TXE uses runtime pm domain handlers which take precedence. 153 + */ 154 + mei_txe_set_pm_domain(dev); 153 155 154 156 pm_runtime_put_noidle(&pdev->dev); 155 157 ··· 188 186 dev_dbg(&pdev->dev, "shutdown\n"); 189 187 mei_stop(dev); 190 188 191 - if (!pci_dev_run_wake(pdev)) 192 - mei_txe_unset_pm_domain(dev); 189 + mei_txe_unset_pm_domain(dev); 193 190 194 191 mei_disable_interrupts(dev); 195 192 free_irq(pdev->irq, dev); ··· 216 215 217 216 mei_stop(dev); 218 217 219 - if (!pci_dev_run_wake(pdev)) 220 - mei_txe_unset_pm_domain(dev); 218 + mei_txe_unset_pm_domain(dev); 221 219 222 220 mei_disable_interrupts(dev); 223 221 free_irq(pdev->irq, dev); ··· 318 318 else 319 319 ret = -EAGAIN; 320 320 321 - /* 322 - * If everything is okay we're about to enter PCI low 323 - * power state (D3) therefor we need to disable the 324 - * interrupts towards host. 325 - * However if device is not wakeable we do not enter 326 - * D-low state and we need to keep the interrupt kicking 327 - */ 328 - if (!ret && pci_dev_run_wake(pdev)) 329 - mei_disable_interrupts(dev); 321 + /* keep irq on we are staying in D0 */ 330 322 331 323 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); 332 324
+1 -1
include/linux/hyperv.h
··· 1403 1403 const int *srv_version, int srv_vercnt, 1404 1404 int *nego_fw_version, int *nego_srv_version); 1405 1405 1406 - void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1406 + void hv_process_channel_removal(u32 relid); 1407 1407 1408 1408 void vmbus_setevent(struct vmbus_channel *channel); 1409 1409 /*