Update to v4.17.3 kernel headers.

Test: Builds, boots on a walleye.
Change-Id: I389d8b61ec00ea309e38d1b1a2e0dace48c21edb
diff --git a/libc/kernel/uapi/rdma/mlx5-abi.h b/libc/kernel/uapi/rdma/mlx5-abi.h
index 9347cf3..170c0a6 100644
--- a/libc/kernel/uapi/rdma/mlx5-abi.h
+++ b/libc/kernel/uapi/rdma/mlx5-abi.h
@@ -24,6 +24,9 @@
   MLX5_QP_FLAG_SIGNATURE = 1 << 0,
   MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
   MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
+  MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
+  MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
+  MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
 };
 enum {
   MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
@@ -48,7 +51,7 @@
   __u8 reserved0;
   __u16 reserved1;
   __u32 reserved2;
-  __u64 lib_caps;
+  __aligned_u64 lib_caps;
 };
 enum mlx5_ib_alloc_ucontext_resp_mask {
   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
@@ -64,6 +67,13 @@
   MLX5_USER_INLINE_MODE_IP,
   MLX5_USER_INLINE_MODE_TCP_UDP,
 };
+enum {
+  MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
+  MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
+  MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
+  MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
+  MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
+};
 struct mlx5_ib_alloc_ucontext_resp {
   __u32 qp_tab_size;
   __u32 bf_reg_size;
@@ -75,16 +85,18 @@
   __u32 max_recv_wr;
   __u32 max_srq_recv_wr;
   __u16 num_ports;
-  __u16 reserved1;
+  __u16 flow_action_flags;
   __u32 comp_mask;
   __u32 response_length;
   __u8 cqe_version;
   __u8 cmds_supp_uhw;
   __u8 eth_min_inline;
-  __u8 reserved2;
-  __u64 hca_core_clock_offset;
+  __u8 clock_info_versions;
+  __aligned_u64 hca_core_clock_offset;
   __u32 log_uar_size;
   __u32 num_uars_per_page;
+  __u32 num_dyn_bfregs;
+  __u32 reserved3;
 };
 struct mlx5_ib_alloc_pd_resp {
   __u32 pdn;
@@ -94,7 +106,7 @@
   __u32 supported_qpts;
 };
 struct mlx5_ib_rss_caps {
-  __u64 rx_hash_fields_mask;
+  __aligned_u64 rx_hash_fields_mask;
   __u8 rx_hash_function;
   __u8 reserved[7];
 };
@@ -107,11 +119,15 @@
   __u32 max_num;
   __u32 supported_format;
 };
+enum mlx5_ib_packet_pacing_cap_flags {
+  MLX5_IB_PP_SUPPORT_BURST = 1 << 0,
+};
 struct mlx5_packet_pacing_caps {
   __u32 qp_rate_limit_min;
   __u32 qp_rate_limit_max;
   __u32 supported_qpts;
-  __u32 reserved;
+  __u8 cap_flags;
+  __u8 reserved[3];
 };
 enum mlx5_ib_mpw_caps {
   MPW_RESERVED = 1 << 0,
@@ -162,8 +178,8 @@
   MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0,
 };
 struct mlx5_ib_create_cq {
-  __u64 buf_addr;
-  __u64 db_addr;
+  __aligned_u64 buf_addr;
+  __aligned_u64 db_addr;
   __u32 cqe_size;
   __u8 cqe_comp_en;
   __u8 cqe_comp_res_format;
@@ -174,14 +190,14 @@
   __u32 reserved;
 };
 struct mlx5_ib_resize_cq {
-  __u64 buf_addr;
+  __aligned_u64 buf_addr;
   __u16 cqe_size;
   __u16 reserved0;
   __u32 reserved1;
 };
 struct mlx5_ib_create_srq {
-  __u64 buf_addr;
-  __u64 db_addr;
+  __aligned_u64 buf_addr;
+  __aligned_u64 db_addr;
   __u32 flags;
   __u32 reserved0;
   __u32 uidx;
@@ -192,15 +208,18 @@
   __u32 reserved;
 };
 struct mlx5_ib_create_qp {
-  __u64 buf_addr;
-  __u64 db_addr;
+  __aligned_u64 buf_addr;
+  __aligned_u64 db_addr;
   __u32 sq_wqe_count;
   __u32 rq_wqe_count;
   __u32 rq_wqe_shift;
   __u32 flags;
   __u32 uidx;
-  __u32 reserved0;
-  __u64 sq_buf_addr;
+  __u32 bfreg_index;
+  union {
+    __aligned_u64 sq_buf_addr;
+    __aligned_u64 access_key;
+  };
 };
 enum mlx5_rx_hash_function_flags {
   MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
@@ -214,10 +233,11 @@
   MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
   MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
   MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
-  MLX5_RX_HASH_INNER = 1 << 31
+  MLX5_RX_HASH_IPSEC_SPI = 1 << 8,
+  MLX5_RX_HASH_INNER = (1UL << 31),
 };
 struct mlx5_ib_create_qp_rss {
-  __u64 rx_hash_fields_mask;
+  __aligned_u64 rx_hash_fields_mask;
   __u8 rx_hash_function;
   __u8 rx_key_len;
   __u8 reserved[6];
@@ -227,6 +247,7 @@
 };
 struct mlx5_ib_create_qp_resp {
   __u32 bfreg_index;
+  __u32 reserved;
 };
 struct mlx5_ib_alloc_mw {
   __u32 comp_mask;
@@ -238,8 +259,8 @@
   MLX5_IB_CREATE_WQ_STRIDING_RQ = (1 << 0),
 };
 struct mlx5_ib_create_wq {
-  __u64 buf_addr;
-  __u64 db_addr;
+  __aligned_u64 buf_addr;
+  __aligned_u64 db_addr;
   __u32 rq_wqe_count;
   __u32 rq_wqe_shift;
   __u32 user_index;
@@ -254,6 +275,20 @@
   __u8 dmac[ETH_ALEN];
   __u8 reserved[6];
 };
+struct mlx5_ib_burst_info {
+  __u32 max_burst_sz;
+  __u16 typical_pkt_sz;
+  __u16 reserved;
+};
+struct mlx5_ib_modify_qp {
+  __u32 comp_mask;
+  struct mlx5_ib_burst_info burst_info;
+  __u32 reserved;
+};
+struct mlx5_ib_modify_qp_resp {
+  __u32 response_length;
+  __u32 dctn;
+};
 struct mlx5_ib_create_wq_resp {
   __u32 response_length;
   __u32 reserved;
@@ -266,4 +301,31 @@
   __u32 comp_mask;
   __u32 reserved;
 };
+struct mlx5_ib_clock_info {
+  __u32 sign;
+  __u32 resv;
+  __aligned_u64 nsec;
+  __aligned_u64 cycles;
+  __aligned_u64 frac;
+  __u32 mult;
+  __u32 shift;
+  __aligned_u64 mask;
+  __aligned_u64 overflow_period;
+};
+enum mlx5_ib_mmap_cmd {
+  MLX5_IB_MMAP_REGULAR_PAGE = 0,
+  MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
+  MLX5_IB_MMAP_WC_PAGE = 2,
+  MLX5_IB_MMAP_NC_PAGE = 3,
+  MLX5_IB_MMAP_CORE_CLOCK = 5,
+  MLX5_IB_MMAP_ALLOC_WC = 6,
+  MLX5_IB_MMAP_CLOCK_INFO = 7,
+  MLX5_IB_MMAP_DEVICE_MEM = 8,
+};
+enum {
+  MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
+};
+enum {
+  MLX5_IB_CLOCK_INFO_V1 = 0,
+};
 #endif