Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'gve-add-rx-hw-timestamping-support'

Ziwei Xiao says:

====================
gve: Add Rx HW timestamping support

This patch series add the support of Rx HW timestamping, which sends
adminq commands periodically to the device for clock synchronization with
the NIC.

The ability to read the PHC from user space will be added in the
future patch series when adding the actual PTP support. For this patch
series, it's adding the initial ptp to utilize the ptp_schedule_worker
to schedule the work of syncing the NIC clock.
====================

Link: https://patch.msgid.link/20250614000754.164827-1-hramamurthy@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+396 -17
+1
drivers/net/ethernet/google/Kconfig
··· 18 18 config GVE 19 19 tristate "Google Virtual NIC (gVNIC) support" 20 20 depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN)) 21 + depends on PTP_1588_CLOCK_OPTIONAL 21 22 select PAGE_POOL 22 23 help 23 24 This driver supports Google Virtual NIC (gVNIC)"
+3 -1
drivers/net/ethernet/google/gve/Makefile
··· 1 1 # Makefile for the Google virtual Ethernet (gve) driver 2 2 3 3 obj-$(CONFIG_GVE) += gve.o 4 - gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \ 4 + gve-y := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \ 5 5 gve_buffer_mgmt_dqo.o 6 + 7 + gve-$(CONFIG_PTP_1588_CLOCK) += gve_ptp.o
+35
drivers/net/ethernet/google/gve/gve.h
··· 11 11 #include <linux/dmapool.h> 12 12 #include <linux/ethtool_netlink.h> 13 13 #include <linux/netdevice.h> 14 + #include <linux/net_tstamp.h> 14 15 #include <linux/pci.h> 16 + #include <linux/ptp_clock_kernel.h> 15 17 #include <linux/u64_stats_sync.h> 16 18 #include <net/page_pool/helpers.h> 17 19 #include <net/xdp.h> ··· 752 750 u32 *hash_lut; 753 751 }; 754 752 753 + struct gve_ptp { 754 + struct ptp_clock_info info; 755 + struct ptp_clock *clock; 756 + struct gve_priv *priv; 757 + }; 758 + 755 759 struct gve_priv { 756 760 struct net_device *dev; 757 761 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ ··· 821 813 u32 adminq_set_driver_parameter_cnt; 822 814 u32 adminq_report_stats_cnt; 823 815 u32 adminq_report_link_speed_cnt; 816 + u32 adminq_report_nic_timestamp_cnt; 824 817 u32 adminq_get_ptype_map_cnt; 825 818 u32 adminq_verify_driver_compatibility_cnt; 826 819 u32 adminq_query_flow_rules_cnt; ··· 879 870 u16 rss_lut_size; 880 871 bool cache_rss_config; 881 872 struct gve_rss_config rss_config; 873 + 874 + /* True if the device supports reading the nic clock */ 875 + bool nic_timestamp_supported; 876 + struct gve_ptp *ptp; 877 + struct kernel_hwtstamp_config ts_config; 878 + struct gve_nic_ts_report *nic_ts_report; 879 + dma_addr_t nic_ts_report_bus; 880 + u64 last_sync_nic_counter; /* Clock counter from last NIC TS report */ 882 881 }; 883 882 884 883 enum gve_service_task_flags_bit { ··· 1266 1249 int gve_flow_rules_reset(struct gve_priv *priv); 1267 1250 /* RSS config */ 1268 1251 int gve_init_rss_config(struct gve_priv *priv, u16 num_queues); 1252 + /* PTP and timestamping */ 1253 + #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) 1254 + int gve_clock_nic_ts_read(struct gve_priv *priv); 1255 + int gve_init_clock(struct gve_priv *priv); 1256 + void gve_teardown_clock(struct gve_priv *priv); 1257 + #else /* CONFIG_PTP_1588_CLOCK */ 1258 + static inline int gve_clock_nic_ts_read(struct gve_priv *priv) 1259 + { 1260 + return -EOPNOTSUPP; 1261 + } 1262 + 1263 + static inline int gve_init_clock(struct gve_priv *priv) 1264 + { 1265 + return 0; 1266 + } 1267 + 1268 + static inline void gve_teardown_clock(struct gve_priv *priv) { } 1269 + #endif /* CONFIG_PTP_1588_CLOCK */ 1269 1270 /* report stats handling */ 1270 1271 void gve_handle_report_stats(struct gve_priv *priv); 1271 1272 /* exported by ethtool.c */
+86 -12
drivers/net/ethernet/google/gve/gve_adminq.c
··· 46 46 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, 47 47 struct gve_device_option_flow_steering **dev_op_flow_steering, 48 48 struct gve_device_option_rss_config **dev_op_rss_config, 49 + struct gve_device_option_nic_timestamp **dev_op_nic_timestamp, 49 50 struct gve_device_option_modify_ring **dev_op_modify_ring) 50 51 { 51 52 u32 req_feat_mask = be32_to_cpu(option->required_features_mask); ··· 226 225 "RSS config"); 227 226 *dev_op_rss_config = (void *)(option + 1); 228 227 break; 228 + case GVE_DEV_OPT_ID_NIC_TIMESTAMP: 229 + if (option_length < sizeof(**dev_op_nic_timestamp) || 230 + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP) { 231 + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 232 + "Nic Timestamp", 233 + (int)sizeof(**dev_op_nic_timestamp), 234 + GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP, 235 + option_length, req_feat_mask); 236 + break; 237 + } 238 + 239 + if (option_length > sizeof(**dev_op_nic_timestamp)) 240 + dev_warn(&priv->pdev->dev, 241 + GVE_DEVICE_OPTION_TOO_BIG_FMT, 242 + "Nic Timestamp"); 243 + *dev_op_nic_timestamp = (void *)(option + 1); 244 + break; 229 245 default: 230 246 /* If we don't recognize the option just continue 231 247 * without doing anything. ··· 264 246 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, 265 247 struct gve_device_option_flow_steering **dev_op_flow_steering, 266 248 struct gve_device_option_rss_config **dev_op_rss_config, 249 + struct gve_device_option_nic_timestamp **dev_op_nic_timestamp, 267 250 struct gve_device_option_modify_ring **dev_op_modify_ring) 268 251 { 269 252 const int num_options = be16_to_cpu(descriptor->num_device_options); ··· 288 269 dev_op_dqo_rda, dev_op_jumbo_frames, 289 270 dev_op_dqo_qpl, dev_op_buffer_sizes, 290 271 dev_op_flow_steering, dev_op_rss_config, 272 + dev_op_nic_timestamp, 291 273 dev_op_modify_ring); 292 274 dev_opt = next_opt; 293 275 } ··· 326 306 priv->adminq_set_driver_parameter_cnt = 0; 327 307 priv->adminq_report_stats_cnt = 0; 328 308 priv->adminq_report_link_speed_cnt = 0; 309 + priv->adminq_report_nic_timestamp_cnt = 0; 329 310 priv->adminq_get_ptype_map_cnt = 0; 330 311 priv->adminq_query_flow_rules_cnt = 0; 331 312 priv->adminq_cfg_flow_rule_cnt = 0; ··· 463 442 int tail, head; 464 443 int i; 465 444 445 + lockdep_assert_held(&priv->adminq_lock); 446 + 466 447 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 467 448 head = priv->adminq_prod_cnt; 468 449 ··· 490 467 return 0; 491 468 } 492 469 493 - /* This function is not threadsafe - the caller is responsible for any 494 - * necessary locks. 495 - */ 496 470 static int gve_adminq_issue_cmd(struct gve_priv *priv, 497 471 union gve_adminq_command *cmd_orig) 498 472 { 499 473 union gve_adminq_command *cmd; 500 474 u32 opcode; 501 475 u32 tail; 476 + 477 + lockdep_assert_held(&priv->adminq_lock); 502 478 503 479 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 504 480 ··· 565 543 break; 566 544 case GVE_ADMINQ_REPORT_LINK_SPEED: 567 545 priv->adminq_report_link_speed_cnt++; 546 + break; 547 + case GVE_ADMINQ_REPORT_NIC_TIMESTAMP: 548 + priv->adminq_report_nic_timestamp_cnt++; 568 549 break; 569 550 case GVE_ADMINQ_GET_PTYPE_MAP: 570 551 priv->adminq_get_ptype_map_cnt++; ··· 734 709 int err; 735 710 int i; 736 711 712 + mutex_lock(&priv->adminq_lock); 713 + 737 714 for (i = start_id; i < start_id + num_queues; i++) { 738 715 err = gve_adminq_create_tx_queue(priv, i); 739 716 if (err) 740 - return err; 717 + goto out; 741 718 } 742 719 743 - return gve_adminq_kick_and_wait(priv); 720 + err = gve_adminq_kick_and_wait(priv); 721 + 722 + out: 723 + mutex_unlock(&priv->adminq_lock); 724 + return err; 744 725 } 745 726 746 727 static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv, ··· 819 788 int err; 820 789 int i; 821 790 791 + mutex_lock(&priv->adminq_lock); 792 + 822 793 for (i = 0; i < num_queues; i++) { 823 794 err = gve_adminq_create_rx_queue(priv, i); 824 795 if (err) 825 - return err; 796 + goto out; 826 797 } 827 798 828 - return gve_adminq_kick_and_wait(priv); 799 + err = gve_adminq_kick_and_wait(priv); 800 + 801 + out: 802 + mutex_unlock(&priv->adminq_lock); 803 + return err; 829 804 } 830 805 831 806 static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index) ··· 857 820 int err; 858 821 int i; 859 822 823 + mutex_lock(&priv->adminq_lock); 824 + 860 825 for (i = start_id; i < start_id + num_queues; i++) { 861 826 err = gve_adminq_destroy_tx_queue(priv, i); 862 827 if (err) 863 - return err; 828 + goto out; 864 829 } 865 830 866 - return gve_adminq_kick_and_wait(priv); 831 + err = gve_adminq_kick_and_wait(priv); 832 + 833 + out: 834 + mutex_unlock(&priv->adminq_lock); 835 + return err; 867 836 } 868 837 869 838 static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd, ··· 904 861 int err; 905 862 int i; 906 863 864 + mutex_lock(&priv->adminq_lock); 865 + 907 866 for (i = 0; i < num_queues; i++) { 908 867 err = gve_adminq_destroy_rx_queue(priv, i); 909 868 if (err) 910 - return err; 869 + goto out; 911 870 } 912 871 913 - return gve_adminq_kick_and_wait(priv); 872 + err = gve_adminq_kick_and_wait(priv); 873 + 874 + out: 875 + mutex_unlock(&priv->adminq_lock); 876 + return err; 914 877 } 915 878 916 879 static void gve_set_default_desc_cnt(struct gve_priv *priv, ··· 953 904 *dev_op_flow_steering, 954 905 const struct gve_device_option_rss_config 955 906 *dev_op_rss_config, 907 + const struct gve_device_option_nic_timestamp 908 + *dev_op_nic_timestamp, 956 909 const struct gve_device_option_modify_ring 957 910 *dev_op_modify_ring) 958 911 { ··· 1031 980 "RSS device option enabled with key size of %u, lut size of %u.\n", 1032 981 priv->rss_key_size, priv->rss_lut_size); 1033 982 } 983 + 984 + if (dev_op_nic_timestamp && 985 + (supported_features_mask & GVE_SUP_NIC_TIMESTAMP_MASK)) 986 + priv->nic_timestamp_supported = true; 1034 987 } 1035 988 1036 989 int gve_adminq_describe_device(struct gve_priv *priv) 1037 990 { 991 + struct gve_device_option_nic_timestamp *dev_op_nic_timestamp = NULL; 1038 992 struct gve_device_option_flow_steering *dev_op_flow_steering = NULL; 1039 993 struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; 1040 994 struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; ··· 1080 1024 &dev_op_buffer_sizes, 1081 1025 &dev_op_flow_steering, 1082 1026 &dev_op_rss_config, 1027 + &dev_op_nic_timestamp, 1083 1028 &dev_op_modify_ring); 1084 1029 if (err) 1085 1030 goto free_device_descriptor; ··· 1145 1088 gve_enable_supported_features(priv, supported_features_mask, 1146 1089 dev_op_jumbo_frames, dev_op_dqo_qpl, 1147 1090 dev_op_buffer_sizes, dev_op_flow_steering, 1148 - dev_op_rss_config, dev_op_modify_ring); 1091 + dev_op_rss_config, dev_op_nic_timestamp, 1092 + dev_op_modify_ring); 1149 1093 1150 1094 free_device_descriptor: 1151 1095 dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); ··· 1256 1198 dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region, 1257 1199 link_speed_region_bus); 1258 1200 return err; 1201 + } 1202 + 1203 + int gve_adminq_report_nic_ts(struct gve_priv *priv, 1204 + dma_addr_t nic_ts_report_addr) 1205 + { 1206 + union gve_adminq_command cmd; 1207 + 1208 + memset(&cmd, 0, sizeof(cmd)); 1209 + cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_NIC_TIMESTAMP); 1210 + cmd.report_nic_ts = (struct gve_adminq_report_nic_ts) { 1211 + .nic_ts_report_len = 1212 + cpu_to_be64(sizeof(struct gve_nic_ts_report)), 1213 + .nic_ts_report_addr = cpu_to_be64(nic_ts_report_addr), 1214 + }; 1215 + 1216 + return gve_adminq_execute_cmd(priv, &cmd); 1259 1217 } 1260 1218 1261 1219 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
+28
drivers/net/ethernet/google/gve/gve_adminq.h
··· 27 27 GVE_ADMINQ_GET_PTYPE_MAP = 0xE, 28 28 GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF, 29 29 GVE_ADMINQ_QUERY_FLOW_RULES = 0x10, 30 + GVE_ADMINQ_REPORT_NIC_TIMESTAMP = 0x11, 30 31 GVE_ADMINQ_QUERY_RSS = 0x12, 31 32 32 33 /* For commands that are larger than 56 bytes */ ··· 175 174 176 175 static_assert(sizeof(struct gve_device_option_rss_config) == 8); 177 176 177 + struct gve_device_option_nic_timestamp { 178 + __be32 supported_features_mask; 179 + }; 180 + 181 + static_assert(sizeof(struct gve_device_option_nic_timestamp) == 4); 182 + 178 183 /* Terminology: 179 184 * 180 185 * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA ··· 199 192 GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, 200 193 GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, 201 194 GVE_DEV_OPT_ID_FLOW_STEERING = 0xb, 195 + GVE_DEV_OPT_ID_NIC_TIMESTAMP = 0xd, 202 196 GVE_DEV_OPT_ID_RSS_CONFIG = 0xe, 203 197 }; 204 198 ··· 214 206 GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0, 215 207 GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0, 216 208 GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0, 209 + GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP = 0x0, 217 210 }; 218 211 219 212 enum gve_sup_feature_mask { ··· 223 214 GVE_SUP_BUFFER_SIZES_MASK = 1 << 4, 224 215 GVE_SUP_FLOW_STEERING_MASK = 1 << 5, 225 216 GVE_SUP_RSS_CONFIG_MASK = 1 << 7, 217 + GVE_SUP_NIC_TIMESTAMP_MASK = 1 << 8, 226 218 }; 227 219 228 220 #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 ··· 401 391 }; 402 392 403 393 static_assert(sizeof(struct gve_adminq_report_link_speed) == 8); 394 + 395 + struct gve_adminq_report_nic_ts { 396 + __be64 nic_ts_report_len; 397 + __be64 nic_ts_report_addr; 398 + }; 399 + 400 + static_assert(sizeof(struct gve_adminq_report_nic_ts) == 16); 401 + 402 + struct gve_nic_ts_report { 403 + __be64 nic_timestamp; /* NIC clock in nanoseconds */ 404 + __be64 reserved1; 405 + __be64 reserved2; 406 + __be64 reserved3; 407 + __be64 reserved4; 408 + }; 404 409 405 410 struct stats { 406 411 __be32 stat_name; ··· 610 585 struct gve_adminq_query_flow_rules query_flow_rules; 611 586 struct gve_adminq_configure_rss configure_rss; 612 587 struct gve_adminq_query_rss query_rss; 588 + struct gve_adminq_report_nic_ts report_nic_ts; 613 589 struct gve_adminq_extended_command extended_command; 614 590 }; 615 591 }; ··· 650 624 int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc); 651 625 int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh); 652 626 int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh); 627 + int gve_adminq_report_nic_ts(struct gve_priv *priv, 628 + dma_addr_t nic_ts_report_addr); 653 629 654 630 struct gve_ptype_lut; 655 631 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
+2 -1
drivers/net/ethernet/google/gve/gve_desc_dqo.h
··· 247 247 }; 248 248 __le32 hash; 249 249 __le32 reserved6; 250 - __le64 reserved7; 250 + __le32 reserved7; 251 + __le32 ts; /* timestamp in nanosecs */ 251 252 } __packed; 252 253 253 254 static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32);
+24 -2
drivers/net/ethernet/google/gve/gve_ethtool.c
··· 76 76 "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", 77 77 "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt", 78 78 "adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt", 79 - "adminq_query_rss_cnt", 79 + "adminq_query_rss_cnt", "adminq_report_nic_timestamp_cnt", 80 80 }; 81 81 82 82 static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { ··· 456 456 data[i++] = priv->adminq_cfg_flow_rule_cnt; 457 457 data[i++] = priv->adminq_cfg_rss_cnt; 458 458 data[i++] = priv->adminq_query_rss_cnt; 459 + data[i++] = priv->adminq_report_nic_timestamp_cnt; 459 460 } 460 461 461 462 static void gve_get_channels(struct net_device *netdev, ··· 923 922 return 0; 924 923 } 925 924 925 + static int gve_get_ts_info(struct net_device *netdev, 926 + struct kernel_ethtool_ts_info *info) 927 + { 928 + struct gve_priv *priv = netdev_priv(netdev); 929 + 930 + ethtool_op_get_ts_info(netdev, info); 931 + 932 + if (priv->nic_timestamp_supported) { 933 + info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE | 934 + SOF_TIMESTAMPING_RAW_HARDWARE; 935 + 936 + info->rx_filters |= BIT(HWTSTAMP_FILTER_NONE) | 937 + BIT(HWTSTAMP_FILTER_ALL); 938 + 939 + if (priv->ptp) 940 + info->phc_index = ptp_clock_index(priv->ptp->clock); 941 + } 942 + 943 + return 0; 944 + } 945 + 926 946 const struct ethtool_ops gve_ethtool_ops = { 927 947 .supported_coalesce_params = ETHTOOL_COALESCE_USECS, 928 948 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, ··· 972 950 .get_priv_flags = gve_get_priv_flags, 973 951 .set_priv_flags = gve_set_priv_flags, 974 952 .get_link_ksettings = gve_get_link_ksettings, 975 - .get_ts_info = ethtool_op_get_ts_info, 953 + .get_ts_info = gve_get_ts_info, 976 954 };
+52 -1
drivers/net/ethernet/google/gve/gve_main.c
··· 619 619 err = gve_alloc_counter_array(priv); 620 620 if (err) 621 621 goto abort_with_rss_config_cache; 622 - err = gve_alloc_notify_blocks(priv); 622 + err = gve_init_clock(priv); 623 623 if (err) 624 624 goto abort_with_counter; 625 + err = gve_alloc_notify_blocks(priv); 626 + if (err) 627 + goto abort_with_clock; 625 628 err = gve_alloc_stats_report(priv); 626 629 if (err) 627 630 goto abort_with_ntfy_blocks; ··· 677 674 gve_free_stats_report(priv); 678 675 abort_with_ntfy_blocks: 679 676 gve_free_notify_blocks(priv); 677 + abort_with_clock: 678 + gve_teardown_clock(priv); 680 679 abort_with_counter: 681 680 gve_free_counter_array(priv); 682 681 abort_with_rss_config_cache: ··· 727 722 gve_free_counter_array(priv); 728 723 gve_free_notify_blocks(priv); 729 724 gve_free_stats_report(priv); 725 + gve_teardown_clock(priv); 730 726 gve_clear_device_resources_ok(priv); 731 727 } 732 728 ··· 2048 2042 return err; 2049 2043 } 2050 2044 2045 + static int gve_get_ts_config(struct net_device *dev, 2046 + struct kernel_hwtstamp_config *kernel_config) 2047 + { 2048 + struct gve_priv *priv = netdev_priv(dev); 2049 + 2050 + *kernel_config = priv->ts_config; 2051 + return 0; 2052 + } 2053 + 2054 + static int gve_set_ts_config(struct net_device *dev, 2055 + struct kernel_hwtstamp_config *kernel_config, 2056 + struct netlink_ext_ack *extack) 2057 + { 2058 + struct gve_priv *priv = netdev_priv(dev); 2059 + 2060 + if (kernel_config->tx_type != HWTSTAMP_TX_OFF) { 2061 + NL_SET_ERR_MSG_MOD(extack, "TX timestamping is not supported"); 2062 + return -ERANGE; 2063 + } 2064 + 2065 + if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) { 2066 + if (!priv->nic_ts_report) { 2067 + NL_SET_ERR_MSG_MOD(extack, 2068 + "RX timestamping is not supported"); 2069 + kernel_config->rx_filter = HWTSTAMP_FILTER_NONE; 2070 + return -EOPNOTSUPP; 2071 + } 2072 + 2073 + kernel_config->rx_filter = HWTSTAMP_FILTER_ALL; 2074 + gve_clock_nic_ts_read(priv); 2075 + ptp_schedule_worker(priv->ptp->clock, 0); 2076 + } else { 2077 + ptp_cancel_worker_sync(priv->ptp->clock); 2078 + } 2079 + 2080 + priv->ts_config.rx_filter = kernel_config->rx_filter; 2081 + 2082 + return 0; 2083 + } 2084 + 2051 2085 static const struct net_device_ops gve_netdev_ops = { 2052 2086 .ndo_start_xmit = gve_start_xmit, 2053 2087 .ndo_features_check = gve_features_check, ··· 2099 2053 .ndo_bpf = gve_xdp, 2100 2054 .ndo_xdp_xmit = gve_xdp_xmit, 2101 2055 .ndo_xsk_wakeup = gve_xsk_wakeup, 2056 + .ndo_hwtstamp_get = gve_get_ts_config, 2057 + .ndo_hwtstamp_set = gve_set_ts_config, 2102 2058 }; 2103 2059 2104 2060 static void gve_handle_status(struct gve_priv *priv, u32 status) ··· 2319 2271 priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO; 2320 2272 priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO; 2321 2273 } 2274 + 2275 + priv->ts_config.tx_type = HWTSTAMP_TX_OFF; 2276 + priv->ts_config.rx_filter = HWTSTAMP_FILTER_NONE; 2322 2277 2323 2278 setup_device: 2324 2279 gve_set_netdev_xdp_features(priv);
+139
drivers/net/ethernet/google/gve/gve_ptp.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 + /* Google virtual Ethernet (gve) driver 3 + * 4 + * Copyright (C) 2025 Google LLC 5 + */ 6 + 7 + #include "gve.h" 8 + #include "gve_adminq.h" 9 + 10 + /* Interval to schedule a nic timestamp calibration, 250ms. */ 11 + #define GVE_NIC_TS_SYNC_INTERVAL_MS 250 12 + 13 + /* Read the nic timestamp from hardware via the admin queue. */ 14 + int gve_clock_nic_ts_read(struct gve_priv *priv) 15 + { 16 + u64 nic_raw; 17 + int err; 18 + 19 + err = gve_adminq_report_nic_ts(priv, priv->nic_ts_report_bus); 20 + if (err) 21 + return err; 22 + 23 + nic_raw = be64_to_cpu(priv->nic_ts_report->nic_timestamp); 24 + WRITE_ONCE(priv->last_sync_nic_counter, nic_raw); 25 + 26 + return 0; 27 + } 28 + 29 + static long gve_ptp_do_aux_work(struct ptp_clock_info *info) 30 + { 31 + const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info); 32 + struct gve_priv *priv = ptp->priv; 33 + int err; 34 + 35 + if (gve_get_reset_in_progress(priv) || !gve_get_admin_queue_ok(priv)) 36 + goto out; 37 + 38 + err = gve_clock_nic_ts_read(priv); 39 + if (err && net_ratelimit()) 40 + dev_err(&priv->pdev->dev, 41 + "%s read err %d\n", __func__, err); 42 + 43 + out: 44 + return msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS); 45 + } 46 + 47 + static const struct ptp_clock_info gve_ptp_caps = { 48 + .owner = THIS_MODULE, 49 + .name = "gve clock", 50 + .do_aux_work = gve_ptp_do_aux_work, 51 + }; 52 + 53 + static int gve_ptp_init(struct gve_priv *priv) 54 + { 55 + struct gve_ptp *ptp; 56 + int err; 57 + 58 + if (!priv->nic_timestamp_supported) { 59 + dev_dbg(&priv->pdev->dev, "Device does not support PTP\n"); 60 + return -EOPNOTSUPP; 61 + } 62 + 63 + priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL); 64 + if (!priv->ptp) 65 + return -ENOMEM; 66 + 67 + ptp = priv->ptp; 68 + ptp->info = gve_ptp_caps; 69 + ptp->clock = ptp_clock_register(&ptp->info, &priv->pdev->dev); 70 + 71 + if (IS_ERR(ptp->clock)) { 72 + dev_err(&priv->pdev->dev, "PTP clock registration failed\n"); 73 + err = PTR_ERR(ptp->clock); 74 + goto free_ptp; 75 + } 76 + 77 + ptp->priv = priv; 78 + return 0; 79 + 80 + free_ptp: 81 + kfree(ptp); 82 + priv->ptp = NULL; 83 + return err; 84 + } 85 + 86 + static void gve_ptp_release(struct gve_priv *priv) 87 + { 88 + struct gve_ptp *ptp = priv->ptp; 89 + 90 + if (!ptp) 91 + return; 92 + 93 + if (ptp->clock) 94 + ptp_clock_unregister(ptp->clock); 95 + 96 + kfree(ptp); 97 + priv->ptp = NULL; 98 + } 99 + 100 + int gve_init_clock(struct gve_priv *priv) 101 + { 102 + int err; 103 + 104 + if (!priv->nic_timestamp_supported) 105 + return 0; 106 + 107 + err = gve_ptp_init(priv); 108 + if (err) 109 + return err; 110 + 111 + priv->nic_ts_report = 112 + dma_alloc_coherent(&priv->pdev->dev, 113 + sizeof(struct gve_nic_ts_report), 114 + &priv->nic_ts_report_bus, 115 + GFP_KERNEL); 116 + if (!priv->nic_ts_report) { 117 + dev_err(&priv->pdev->dev, "%s dma alloc error\n", __func__); 118 + err = -ENOMEM; 119 + goto release_ptp; 120 + } 121 + 122 + return 0; 123 + 124 + release_ptp: 125 + gve_ptp_release(priv); 126 + return err; 127 + } 128 + 129 + void gve_teardown_clock(struct gve_priv *priv) 130 + { 131 + gve_ptp_release(priv); 132 + 133 + if (priv->nic_ts_report) { 134 + dma_free_coherent(&priv->pdev->dev, 135 + sizeof(struct gve_nic_ts_report), 136 + priv->nic_ts_report, priv->nic_ts_report_bus); 137 + priv->nic_ts_report = NULL; 138 + } 139 + }
+26
drivers/net/ethernet/google/gve/gve_rx_dqo.c
··· 437 437 skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type); 438 438 } 439 439 440 + /* Expand the hardware timestamp to the full 64 bits of width, and add it to the 441 + * skb. 442 + * 443 + * This algorithm works by using the passed hardware timestamp to generate a 444 + * diff relative to the last read of the nic clock. This diff can be positive or 445 + * negative, as it is possible that we have read the clock more recently than 446 + * the hardware has received this packet. To detect this, we use the high bit of 447 + * the diff, and assume that the read is more recent if the high bit is set. In 448 + * this case we invert the process. 449 + * 450 + * Note that this means if the time delta between packet reception and the last 451 + * clock read is greater than ~2 seconds, this will provide invalid results. 452 + */ 453 + static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx, u32 hwts) 454 + { 455 + u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter); 456 + struct sk_buff *skb = rx->ctx.skb_head; 457 + u32 low = (u32)last_read; 458 + s32 diff = hwts - low; 459 + 460 + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff); 461 + } 462 + 440 463 static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx) 441 464 { 442 465 if (!rx->ctx.skb_head) ··· 789 766 790 767 if (feat & NETIF_F_RXCSUM) 791 768 gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype); 769 + 770 + if (rx->gve->ts_config.rx_filter == HWTSTAMP_FILTER_ALL) 771 + gve_rx_skb_hwtstamp(rx, le32_to_cpu(desc->ts)); 792 772 793 773 /* RSC packets must set gso_size otherwise the TCP stack will complain 794 774 * that packets are larger than MTU.