diff options
Diffstat (limited to 'include')
475 files changed, 14809 insertions, 4550 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index b2e377b7f337..aad1a95e6863 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -43,9 +43,6 @@ acpi_status acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code, struct acpi_buffer *status_buf); -acpi_status -acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld); - bool acpi_has_method(acpi_handle handle, char *name); acpi_status acpi_execute_simple_method(acpi_handle handle, char *method, u64 arg); @@ -60,6 +57,9 @@ bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs); union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4); #ifdef CONFIG_ACPI +bool +acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld); + static inline union acpi_object * acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4, @@ -1003,6 +1003,23 @@ static inline int unregister_acpi_bus_type(void *bus) { return 0; } static inline int acpi_wait_for_acpi_ipmi(void) { return 0; } +static inline const char *acpi_device_hid(struct acpi_device *device) +{ + return ""; +} + +static inline bool +acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld) +{ + return false; +} + +#define for_each_acpi_consumer_dev(supplier, consumer) \ + for (consumer = NULL; false && (supplier);) + +#define for_each_acpi_dev_match(adev, hid, uid, hrv) \ + for (adev = NULL; false && (hid) && (uid) && (hrv); ) + #endif /* CONFIG_ACPI */ #endif /*__ACPI_BUS_H__*/ diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h index b5f594754a9e..99b960bd473c 100644 --- a/include/acpi/acpi_numa.h +++ b/include/acpi/acpi_numa.h @@ -17,11 +17,16 @@ extern int node_to_pxm(int); extern int acpi_map_pxm_to_node(int); extern unsigned char acpi_srat_revision; extern void disable_srat(void); +extern int fix_pxm_node_maps(int max_nid); extern void bad_srat(void); extern int srat_disabled(void); #else /* CONFIG_ACPI_NUMA */ +static inline int fix_pxm_node_maps(int max_nid) +{ + return 0; +} static inline void disable_srat(void) { } diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index 9d0479f50f97..5db59a1efb65 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -35,7 +35,7 @@ extern void early_ioremap_reset(void); /* * Early copy from unmapped memory to kernel mapped memory. */ -extern void copy_from_early_mem(void *dest, phys_addr_t src, +extern int copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size); #else diff --git a/include/asm-generic/fprobe.h b/include/asm-generic/fprobe.h new file mode 100644 index 000000000000..8659a4dc6eb6 --- /dev/null +++ b/include/asm-generic/fprobe.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Generic arch dependent fprobe macros. + */ +#ifndef __ASM_GENERIC_FPROBE_H__ +#define __ASM_GENERIC_FPROBE_H__ + +#include <linux/bits.h> + +#ifdef CONFIG_64BIT +/* + * Encoding the size and the address of fprobe into one 64bit entry. + * The 32bit architectures should use 2 entries to store those info. + */ + +#define ARCH_DEFINE_ENCODE_FPROBE_HEADER + +#define FPROBE_HEADER_MSB_SIZE_SHIFT (BITS_PER_LONG - FPROBE_DATA_SIZE_BITS) +#define FPROBE_HEADER_MSB_MASK \ + GENMASK(FPROBE_HEADER_MSB_SIZE_SHIFT - 1, 0) + +/* + * By default, this expects the MSBs in the address of kprobe is 0xf. + * If any arch needs another fixed pattern (e.g. s390 is zero filled), + * override this. + */ +#define FPROBE_HEADER_MSB_PATTERN \ + GENMASK(BITS_PER_LONG - 1, FPROBE_HEADER_MSB_SIZE_SHIFT) + +#define arch_fprobe_header_encodable(fp) \ + (((unsigned long)(fp) & ~FPROBE_HEADER_MSB_MASK) == \ + FPROBE_HEADER_MSB_PATTERN) + +#define arch_encode_fprobe_header(fp, size) \ + (((unsigned long)(fp) & FPROBE_HEADER_MSB_MASK) | \ + ((unsigned long)(size) << FPROBE_HEADER_MSB_SIZE_SHIFT)) + +#define arch_decode_fprobe_header_size(val) \ + ((unsigned long)(val) >> FPROBE_HEADER_MSB_SIZE_SHIFT) + +#define arch_decode_fprobe_header_fp(val) \ + ((struct fprobe *)(((unsigned long)(val) & FPROBE_HEADER_MSB_MASK) | \ + FPROBE_HEADER_MSB_PATTERN)) +#endif /* CONFIG_64BIT */ + +#endif /* __ASM_GENERIC_FPROBE_H__ */ diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h deleted file mode 100644 index 814207e7c37f..000000000000 --- a/include/asm-generic/hyperv-tlfs.h +++ /dev/null @@ -1,874 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* - * This file contains definitions from Hyper-V Hypervisor Top-Level Functional - * Specification (TLFS): - * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs - */ - -#ifndef _ASM_GENERIC_HYPERV_TLFS_H -#define _ASM_GENERIC_HYPERV_TLFS_H - -#include <linux/types.h> -#include <linux/bits.h> -#include <linux/time64.h> - -/* - * While not explicitly listed in the TLFS, Hyper-V always runs with a page size - * of 4096. These definitions are used when communicating with Hyper-V using - * guest physical pages and guest physical page addresses, since the guest page - * size may not be 4096 on all architectures. - */ -#define HV_HYP_PAGE_SHIFT 12 -#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT) -#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1)) - -/* - * Hyper-V provides two categories of flags relevant to guest VMs. The - * "Features" category indicates specific functionality that is available - * to guests on this particular instance of Hyper-V. The "Features" - * are presented in four groups, each of which is 32 bits. The group A - * and B definitions are common across architectures and are listed here. - * However, not all flags are relevant on all architectures. - * - * Groups C and D vary across architectures and are listed in the - * architecture specific portion of hyperv-tlfs.h. Some of these flags exist - * on multiple architectures, but the bit positions are different so they - * cannot appear in the generic portion of hyperv-tlfs.h. - * - * The "Enlightenments" category provides recommendations on whether to use - * specific enlightenments that are available. The Enlighenments are a single - * group of 32 bits, but they vary across architectures and are listed in - * the architecture specific portion of hyperv-tlfs.h. - */ - -/* - * Group A Features. - */ - -/* VP Runtime register available */ -#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0) -/* Partition Reference Counter available*/ -#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1) -/* Basic SynIC register available */ -#define HV_MSR_SYNIC_AVAILABLE BIT(2) -/* Synthetic Timer registers available */ -#define HV_MSR_SYNTIMER_AVAILABLE BIT(3) -/* Virtual APIC assist and VP assist page registers available */ -#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4) -/* Hypercall and Guest OS ID registers available*/ -#define HV_MSR_HYPERCALL_AVAILABLE BIT(5) -/* Access virtual processor index register available*/ -#define HV_MSR_VP_INDEX_AVAILABLE BIT(6) -/* Virtual system reset register available*/ -#define HV_MSR_RESET_AVAILABLE BIT(7) -/* Access statistics page registers available */ -#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8) -/* Partition reference TSC register is available */ -#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9) -/* Partition Guest IDLE register is available */ -#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10) -/* Partition local APIC and TSC frequency registers available */ -#define HV_ACCESS_FREQUENCY_MSRS BIT(11) -/* AccessReenlightenmentControls privilege */ -#define HV_ACCESS_REENLIGHTENMENT BIT(13) -/* AccessTscInvariantControls privilege */ -#define HV_ACCESS_TSC_INVARIANT BIT(15) - -/* - * Group B features. - */ -#define HV_CREATE_PARTITIONS BIT(0) -#define HV_ACCESS_PARTITION_ID BIT(1) -#define HV_ACCESS_MEMORY_POOL BIT(2) -#define HV_ADJUST_MESSAGE_BUFFERS BIT(3) -#define HV_POST_MESSAGES BIT(4) -#define HV_SIGNAL_EVENTS BIT(5) -#define HV_CREATE_PORT BIT(6) -#define HV_CONNECT_PORT BIT(7) -#define HV_ACCESS_STATS BIT(8) -#define HV_DEBUGGING BIT(11) -#define HV_CPU_MANAGEMENT BIT(12) -#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20) -#define HV_ISOLATION BIT(22) - -/* - * TSC page layout. - */ -struct ms_hyperv_tsc_page { - volatile u32 tsc_sequence; - u32 reserved1; - volatile u64 tsc_scale; - volatile s64 tsc_offset; -} __packed; - -union hv_reference_tsc_msr { - u64 as_uint64; - struct { - u64 enable:1; - u64 reserved:11; - u64 pfn:52; - } __packed; -}; - -/* - * The guest OS needs to register the guest ID with the hypervisor. - * The guest ID is a 64 bit entity and the structure of this ID is - * specified in the Hyper-V specification: - * - * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx - * - * While the current guideline does not specify how Linux guest ID(s) - * need to be generated, our plan is to publish the guidelines for - * Linux and other guest operating systems that currently are hosted - * on Hyper-V. The implementation here conforms to this yet - * unpublished guidelines. - * - * - * Bit(s) - * 63 - Indicates if the OS is Open Source or not; 1 is Open Source - * 62:56 - Os Type; Linux is 0x100 - * 55:48 - Distro specific identification - * 47:16 - Linux kernel version number - * 15:0 - Distro specific identification - * - * - */ - -#define HV_LINUX_VENDOR_ID 0x8100 - -/* - * Crash notification flags. - */ -#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62) -#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63) - -/* Declare the various hypercall operations. */ -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 -#define HVCALL_ENABLE_VP_VTL 0x000f -#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 -#define HVCALL_SEND_IPI 0x000b -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 -#define HVCALL_SEND_IPI_EX 0x0015 -#define HVCALL_GET_PARTITION_ID 0x0046 -#define HVCALL_DEPOSIT_MEMORY 0x0048 -#define HVCALL_CREATE_VP 0x004e -#define HVCALL_GET_VP_REGISTERS 0x0050 -#define HVCALL_SET_VP_REGISTERS 0x0051 -#define HVCALL_POST_MESSAGE 0x005c -#define HVCALL_SIGNAL_EVENT 0x005d -#define HVCALL_POST_DEBUG_DATA 0x0069 -#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a -#define HVCALL_RESET_DEBUG_SESSION 0x006b -#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076 -#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c -#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d -#define HVCALL_RETARGET_INTERRUPT 0x007e -#define HVCALL_START_VP 0x0099 -#define HVCALL_GET_VP_ID_FROM_APIC_ID 0x009a -#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af -#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 -#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db -#define HVCALL_MMIO_READ 0x0106 -#define HVCALL_MMIO_WRITE 0x0107 - -/* Extended hypercalls */ -#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001 -#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003 - -#define HV_FLUSH_ALL_PROCESSORS BIT(0) -#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1) -#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2) -#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) - -/* Extended capability bits */ -#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8) - -enum HV_GENERIC_SET_FORMAT { - HV_GENERIC_SET_SPARSE_4K, - HV_GENERIC_SET_ALL, -}; - -#define HV_PARTITION_ID_SELF ((u64)-1) -#define HV_VP_INDEX_SELF ((u32)-2) - -#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0) -#define HV_HYPERCALL_FAST_BIT BIT(16) -#define HV_HYPERCALL_VARHEAD_OFFSET 17 -#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17) -#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27) -#define HV_HYPERCALL_NESTED BIT_ULL(31) -#define HV_HYPERCALL_REP_COMP_OFFSET 32 -#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32) -#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32) -#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44) -#define HV_HYPERCALL_REP_START_OFFSET 48 -#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48) -#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60) -#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \ - HV_HYPERCALL_RSVD1_MASK | \ - HV_HYPERCALL_RSVD2_MASK) - -/* hypercall status code */ -#define HV_STATUS_SUCCESS 0 -#define HV_STATUS_INVALID_HYPERCALL_CODE 2 -#define HV_STATUS_INVALID_HYPERCALL_INPUT 3 -#define HV_STATUS_INVALID_ALIGNMENT 4 -#define HV_STATUS_INVALID_PARAMETER 5 -#define HV_STATUS_ACCESS_DENIED 6 -#define HV_STATUS_OPERATION_DENIED 8 -#define HV_STATUS_INSUFFICIENT_MEMORY 11 -#define HV_STATUS_INVALID_PORT_ID 17 -#define HV_STATUS_INVALID_CONNECTION_ID 18 -#define HV_STATUS_INSUFFICIENT_BUFFERS 19 -#define HV_STATUS_TIME_OUT 120 -#define HV_STATUS_VTL_ALREADY_ENABLED 134 - -/* - * The Hyper-V TimeRefCount register and the TSC - * page provide a guest VM clock with 100ns tick rate - */ -#define HV_CLOCK_HZ (NSEC_PER_SEC/100) - -/* Define the number of synthetic interrupt sources. */ -#define HV_SYNIC_SINT_COUNT (16) -/* Define the expected SynIC version. */ -#define HV_SYNIC_VERSION_1 (0x1) -/* Valid SynIC vectors are 16-255. */ -#define HV_SYNIC_FIRST_VALID_VECTOR (16) - -#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0) -#define HV_SYNIC_SIMP_ENABLE (1ULL << 0) -#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0) -#define HV_SYNIC_SINT_MASKED (1ULL << 16) -#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17) -#define HV_SYNIC_SINT_VECTOR_MASK (0xFF) - -#define HV_SYNIC_STIMER_COUNT (4) - -/* Define synthetic interrupt controller message constants. */ -#define HV_MESSAGE_SIZE (256) -#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240) -#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30) - -/* - * Define hypervisor message types. Some of the message types - * are x86/x64 specific, but there's no good way to separate - * them out into the arch-specific version of hyperv-tlfs.h - * because C doesn't provide a way to extend enum types. - * Keeping them all in the arch neutral hyperv-tlfs.h seems - * the least messy compromise. - */ -enum hv_message_type { - HVMSG_NONE = 0x00000000, - - /* Memory access messages. */ - HVMSG_UNMAPPED_GPA = 0x80000000, - HVMSG_GPA_INTERCEPT = 0x80000001, - - /* Timer notification messages. */ - HVMSG_TIMER_EXPIRED = 0x80000010, - - /* Error messages. */ - HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020, - HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021, - HVMSG_UNSUPPORTED_FEATURE = 0x80000022, - - /* Trace buffer complete messages. */ - HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040, - - /* Platform-specific processor intercept messages. */ - HVMSG_X64_IOPORT_INTERCEPT = 0x80010000, - HVMSG_X64_MSR_INTERCEPT = 0x80010001, - HVMSG_X64_CPUID_INTERCEPT = 0x80010002, - HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003, - HVMSG_X64_APIC_EOI = 0x80010004, - HVMSG_X64_LEGACY_FP_ERROR = 0x80010005 -}; - -/* Define synthetic interrupt controller message flags. */ -union hv_message_flags { - __u8 asu8; - struct { - __u8 msg_pending:1; - __u8 reserved:7; - } __packed; -}; - -/* Define port identifier type. */ -union hv_port_id { - __u32 asu32; - struct { - __u32 id:24; - __u32 reserved:8; - } __packed u; -}; - -/* Define synthetic interrupt controller message header. */ -struct hv_message_header { - __u32 message_type; - __u8 payload_size; - union hv_message_flags message_flags; - __u8 reserved[2]; - union { - __u64 sender; - union hv_port_id port; - }; -} __packed; - -/* Define synthetic interrupt controller message format. */ -struct hv_message { - struct hv_message_header header; - union { - __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; - } u; -} __packed; - -/* Define the synthetic interrupt message page layout. */ -struct hv_message_page { - struct hv_message sint_message[HV_SYNIC_SINT_COUNT]; -} __packed; - -/* Define timer message payload structure. */ -struct hv_timer_message_payload { - __u32 timer_index; - __u32 reserved; - __u64 expiration_time; /* When the timer expired */ - __u64 delivery_time; /* When the message was delivered */ -} __packed; - - -/* Define synthetic interrupt controller flag constants. */ -#define HV_EVENT_FLAGS_COUNT (256 * 8) -#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long)) - -/* - * Synthetic timer configuration. - */ -union hv_stimer_config { - u64 as_uint64; - struct { - u64 enable:1; - u64 periodic:1; - u64 lazy:1; - u64 auto_enable:1; - u64 apic_vector:8; - u64 direct_mode:1; - u64 reserved_z0:3; - u64 sintx:4; - u64 reserved_z1:44; - } __packed; -}; - - -/* Define the synthetic interrupt controller event flags format. */ -union hv_synic_event_flags { - unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT]; -}; - -/* Define SynIC control register. */ -union hv_synic_scontrol { - u64 as_uint64; - struct { - u64 enable:1; - u64 reserved:63; - } __packed; -}; - -/* Define synthetic interrupt source. */ -union hv_synic_sint { - u64 as_uint64; - struct { - u64 vector:8; - u64 reserved1:8; - u64 masked:1; - u64 auto_eoi:1; - u64 polling:1; - u64 reserved2:45; - } __packed; -}; - -/* Define the format of the SIMP register */ -union hv_synic_simp { - u64 as_uint64; - struct { - u64 simp_enabled:1; - u64 preserved:11; - u64 base_simp_gpa:52; - } __packed; -}; - -/* Define the format of the SIEFP register */ -union hv_synic_siefp { - u64 as_uint64; - struct { - u64 siefp_enabled:1; - u64 preserved:11; - u64 base_siefp_gpa:52; - } __packed; -}; - -struct hv_vpset { - u64 format; - u64 valid_bank_mask; - u64 bank_contents[]; -} __packed; - -/* The maximum number of sparse vCPU banks which can be encoded by 'struct hv_vpset' */ -#define HV_MAX_SPARSE_VCPU_BANKS (64) -/* The number of vCPUs in one sparse bank */ -#define HV_VCPUS_PER_SPARSE_BANK (64) - -/* HvCallSendSyntheticClusterIpi hypercall */ -struct hv_send_ipi { - u32 vector; - u32 reserved; - u64 cpu_mask; -} __packed; - -/* HvCallSendSyntheticClusterIpiEx hypercall */ -struct hv_send_ipi_ex { - u32 vector; - u32 reserved; - struct hv_vpset vp_set; -} __packed; - -/* HvFlushGuestPhysicalAddressSpace hypercalls */ -struct hv_guest_mapping_flush { - u64 address_space; - u64 flags; -} __packed; - -/* - * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited - * by the bitwidth of "additional_pages" in union hv_gpa_page_range. - */ -#define HV_MAX_FLUSH_PAGES (2048) -#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0 -#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1 - -/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */ -union hv_gpa_page_range { - u64 address_space; - struct { - u64 additional_pages:11; - u64 largepage:1; - u64 basepfn:52; - } page; - struct { - u64 reserved:12; - u64 page_size:1; - u64 reserved1:8; - u64 base_large_pfn:43; - }; -}; - -/* - * All input flush parameters should be in single page. The max flush - * count is equal with how many entries of union hv_gpa_page_range can - * be populated into the input parameter page. - */ -#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \ - sizeof(union hv_gpa_page_range)) - -struct hv_guest_mapping_flush_list { - u64 address_space; - u64 flags; - union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT]; -}; - -/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ -struct hv_tlb_flush { - u64 address_space; - u64 flags; - u64 processor_mask; - u64 gva_list[]; -} __packed; - -/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */ -struct hv_tlb_flush_ex { - u64 address_space; - u64 flags; - struct hv_vpset hv_vp_set; - u64 gva_list[]; -} __packed; - -/* HvGetPartitionId hypercall (output only) */ -struct hv_get_partition_id { - u64 partition_id; -} __packed; - -/* HvDepositMemory hypercall */ -struct hv_deposit_memory { - u64 partition_id; - u64 gpa_page_list[]; -} __packed; - -struct hv_proximity_domain_flags { - u32 proximity_preferred : 1; - u32 reserved : 30; - u32 proximity_info_valid : 1; -} __packed; - -struct hv_proximity_domain_info { - u32 domain_id; - struct hv_proximity_domain_flags flags; -} __packed; - -struct hv_lp_startup_status { - u64 hv_status; - u64 substatus1; - u64 substatus2; - u64 substatus3; - u64 substatus4; - u64 substatus5; - u64 substatus6; -} __packed; - -/* HvAddLogicalProcessor hypercall */ -struct hv_input_add_logical_processor { - u32 lp_index; - u32 apic_id; - struct hv_proximity_domain_info proximity_domain_info; -} __packed; - -struct hv_output_add_logical_processor { - struct hv_lp_startup_status startup_status; -} __packed; - -enum HV_SUBNODE_TYPE -{ - HvSubnodeAny = 0, - HvSubnodeSocket = 1, - HvSubnodeAmdNode = 2, - HvSubnodeL3 = 3, - HvSubnodeCount = 4, - HvSubnodeInvalid = -1 -}; - -/* HvCreateVp hypercall */ -struct hv_create_vp { - u64 partition_id; - u32 vp_index; - u8 padding[3]; - u8 subnode_type; - u64 subnode_id; - struct hv_proximity_domain_info proximity_domain_info; - u64 flags; -} __packed; - -enum hv_interrupt_source { - HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */ - HV_INTERRUPT_SOURCE_IOAPIC, -}; - -union hv_ioapic_rte { - u64 as_uint64; - - struct { - u32 vector:8; - u32 delivery_mode:3; - u32 destination_mode:1; - u32 delivery_status:1; - u32 interrupt_polarity:1; - u32 remote_irr:1; - u32 trigger_mode:1; - u32 interrupt_mask:1; - u32 reserved1:15; - - u32 reserved2:24; - u32 destination_id:8; - }; - - struct { - u32 low_uint32; - u32 high_uint32; - }; -} __packed; - -struct hv_interrupt_entry { - u32 source; - u32 reserved1; - union { - union hv_msi_entry msi_entry; - union hv_ioapic_rte ioapic_rte; - }; -} __packed; - -/* - * flags for hv_device_interrupt_target.flags - */ -#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 -#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 - -struct hv_device_interrupt_target { - u32 vector; - u32 flags; - union { - u64 vp_mask; - struct hv_vpset vp_set; - }; -} __packed; - -struct hv_retarget_device_interrupt { - u64 partition_id; /* use "self" */ - u64 device_id; - struct hv_interrupt_entry int_entry; - u64 reserved2; - struct hv_device_interrupt_target int_target; -} __packed __aligned(8); - -/* - * These Hyper-V registers provide information equivalent to the CPUID - * instruction on x86/x64. - */ -#define HV_REGISTER_HYPERVISOR_VERSION 0x00000100 /*CPUID 0x40000002 */ -#define HV_REGISTER_FEATURES 0x00000200 /*CPUID 0x40000003 */ -#define HV_REGISTER_ENLIGHTENMENTS 0x00000201 /*CPUID 0x40000004 */ - -/* - * Synthetic register definitions equivalent to MSRs on x86/x64 - */ -#define HV_REGISTER_GUEST_CRASH_P0 0x00000210 -#define HV_REGISTER_GUEST_CRASH_P1 0x00000211 -#define HV_REGISTER_GUEST_CRASH_P2 0x00000212 -#define HV_REGISTER_GUEST_CRASH_P3 0x00000213 -#define HV_REGISTER_GUEST_CRASH_P4 0x00000214 -#define HV_REGISTER_GUEST_CRASH_CTL 0x00000215 - -#define HV_REGISTER_GUEST_OS_ID 0x00090002 -#define HV_REGISTER_VP_INDEX 0x00090003 -#define HV_REGISTER_TIME_REF_COUNT 0x00090004 -#define HV_REGISTER_REFERENCE_TSC 0x00090017 - -#define HV_REGISTER_SINT0 0x000A0000 -#define HV_REGISTER_SCONTROL 0x000A0010 -#define HV_REGISTER_SIEFP 0x000A0012 -#define HV_REGISTER_SIMP 0x000A0013 -#define HV_REGISTER_EOM 0x000A0014 - -#define HV_REGISTER_STIMER0_CONFIG 0x000B0000 -#define HV_REGISTER_STIMER0_COUNT 0x000B0001 - -/* HvGetVpRegisters hypercall input with variable size reg name list*/ -struct hv_get_vp_registers_input { - struct { - u64 partitionid; - u32 vpindex; - u8 inputvtl; - u8 padding[3]; - } header; - struct input { - u32 name0; - u32 name1; - } element[]; -} __packed; - -/* HvGetVpRegisters returns an array of these output elements */ -struct hv_get_vp_registers_output { - union { - struct { - u32 a; - u32 b; - u32 c; - u32 d; - } as32 __packed; - struct { - u64 low; - u64 high; - } as64 __packed; - }; -}; - -/* HvSetVpRegisters hypercall with variable size reg name/value list*/ -struct hv_set_vp_registers_input { - struct { - u64 partitionid; - u32 vpindex; - u8 inputvtl; - u8 padding[3]; - } header; - struct { - u32 name; - u32 padding1; - u64 padding2; - u64 valuelow; - u64 valuehigh; - } element[]; -} __packed; - -enum hv_device_type { - HV_DEVICE_TYPE_LOGICAL = 0, - HV_DEVICE_TYPE_PCI = 1, - HV_DEVICE_TYPE_IOAPIC = 2, - HV_DEVICE_TYPE_ACPI = 3, -}; - -typedef u16 hv_pci_rid; -typedef u16 hv_pci_segment; -typedef u64 hv_logical_device_id; -union hv_pci_bdf { - u16 as_uint16; - - struct { - u8 function:3; - u8 device:5; - u8 bus; - }; -} __packed; - -union hv_pci_bus_range { - u16 as_uint16; - - struct { - u8 subordinate_bus; - u8 secondary_bus; - }; -} __packed; - -union hv_device_id { - u64 as_uint64; - - struct { - u64 reserved0:62; - u64 device_type:2; - }; - - /* HV_DEVICE_TYPE_LOGICAL */ - struct { - u64 id:62; - u64 device_type:2; - } logical; - - /* HV_DEVICE_TYPE_PCI */ - struct { - union { - hv_pci_rid rid; - union hv_pci_bdf bdf; - }; - - hv_pci_segment segment; - union hv_pci_bus_range shadow_bus_range; - - u16 phantom_function_bits:2; - u16 source_shadow:1; - - u16 rsvdz0:11; - u16 device_type:2; - } pci; - - /* HV_DEVICE_TYPE_IOAPIC */ - struct { - u8 ioapic_id; - u8 rsvdz0; - u16 rsvdz1; - u16 rsvdz2; - - u16 rsvdz3:14; - u16 device_type:2; - } ioapic; - - /* HV_DEVICE_TYPE_ACPI */ - struct { - u32 input_mapping_base; - u32 input_mapping_count:30; - u32 device_type:2; - } acpi; -} __packed; - -enum hv_interrupt_trigger_mode { - HV_INTERRUPT_TRIGGER_MODE_EDGE = 0, - HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1, -}; - -struct hv_device_interrupt_descriptor { - u32 interrupt_type; - u32 trigger_mode; - u32 vector_count; - u32 reserved; - struct hv_device_interrupt_target target; -} __packed; - -struct hv_input_map_device_interrupt { - u64 partition_id; - u64 device_id; - u64 flags; - struct hv_interrupt_entry logical_interrupt_entry; - struct hv_device_interrupt_descriptor interrupt_descriptor; -} __packed; - -struct hv_output_map_device_interrupt { - struct hv_interrupt_entry interrupt_entry; -} __packed; - -struct hv_input_unmap_device_interrupt { - u64 partition_id; - u64 device_id; - struct hv_interrupt_entry interrupt_entry; -} __packed; - -#define HV_SOURCE_SHADOW_NONE 0x0 -#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1 - -/* - * Version info reported by hypervisor - */ -union hv_hypervisor_version_info { - struct { - u32 build_number; - - u32 minor_version : 16; - u32 major_version : 16; - - u32 service_pack; - - u32 service_number : 24; - u32 service_branch : 8; - }; - struct { - u32 eax; - u32 ebx; - u32 ecx; - u32 edx; - }; -}; - -/* - * The whole argument should fit in a page to be able to pass to the hypervisor - * in one hypercall. - */ -#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \ - ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \ - sizeof(union hv_gpa_page_range)) - -/* HvExtCallMemoryHeatHint hypercall */ -#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2 -struct hv_memory_hint { - u64 type:2; - u64 reserved:62; - union hv_gpa_page_range ranges[]; -} __packed; - -/* Data structures for HVCALL_MMIO_READ and HVCALL_MMIO_WRITE */ -#define HV_HYPERCALL_MMIO_MAX_DATA_LENGTH 64 - -struct hv_mmio_read_input { - u64 gpa; - u32 size; - u32 reserved; -} __packed; - -struct hv_mmio_read_output { - u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH]; -} __packed; - -struct hv_mmio_write_input { - u64 gpa; - u32 size; - u32 reserved; - u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH]; -} __packed; - -#endif diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 8fe7aaab2599..a7bbe504e4f3 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -6,9 +6,8 @@ * independent. See arch/<arch>/include/asm/mshyperv.h for definitions * that are specific to architecture <arch>. * - * Definitions that are specified in the Hyper-V Top Level Functional - * Spec (TLFS) should not go in this file, but should instead go in - * hyperv-tlfs.h. + * Definitions that are derived from Hyper-V code or headers should not go in + * this file, but should instead go in the relevant files in include/hyperv. * * Copyright (C) 2019, Microsoft, Inc. * @@ -25,7 +24,7 @@ #include <linux/cpumask.h> #include <linux/nmi.h> #include <asm/ptrace.h> -#include <asm/hyperv-tlfs.h> +#include <hyperv/hvhdk.h> #define VTPM_BASE_ADDRESS 0xfed40000 diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 7c48f5fbf8aa..892ece4558a2 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -109,8 +109,7 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page) { struct ptdesc *ptdesc = page_ptdesc(pte_page); - pagetable_pte_dtor(ptdesc); - pagetable_free(ptdesc); + pagetable_dtor_free(ptdesc); } @@ -153,8 +152,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) struct ptdesc *ptdesc = virt_to_ptdesc(pmd); BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); - pagetable_pmd_dtor(ptdesc); - pagetable_free(ptdesc); + pagetable_dtor_free(ptdesc); } #endif @@ -202,8 +200,7 @@ static inline void __pud_free(struct mm_struct *mm, pud_t *pud) struct ptdesc *ptdesc = virt_to_ptdesc(pud); BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); - pagetable_pud_dtor(ptdesc); - pagetable_free(ptdesc); + pagetable_dtor_free(ptdesc); } #ifndef __HAVE_ARCH_PUD_FREE @@ -215,10 +212,82 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) #endif /* CONFIG_PGTABLE_LEVELS > 3 */ +#if CONFIG_PGTABLE_LEVELS > 4 + +static inline p4d_t *__p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) +{ + gfp_t gfp = GFP_PGTABLE_USER; + struct ptdesc *ptdesc; + + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + gfp &= ~__GFP_HIGHMEM; + + ptdesc = pagetable_alloc_noprof(gfp, 0); + if (!ptdesc) + return NULL; + + pagetable_p4d_ctor(ptdesc); + return ptdesc_address(ptdesc); +} +#define __p4d_alloc_one(...) alloc_hooks(__p4d_alloc_one_noprof(__VA_ARGS__)) + +#ifndef __HAVE_ARCH_P4D_ALLOC_ONE +static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) +{ + return __p4d_alloc_one_noprof(mm, addr); +} +#define p4d_alloc_one(...) alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__)) +#endif + +static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + struct ptdesc *ptdesc = virt_to_ptdesc(p4d); + + BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); + pagetable_dtor_free(ptdesc); +} + +#ifndef __HAVE_ARCH_P4D_FREE +static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + if (!mm_p4d_folded(mm)) + __p4d_free(mm, p4d); +} +#endif + +#endif /* CONFIG_PGTABLE_LEVELS > 4 */ + +static inline pgd_t *__pgd_alloc_noprof(struct mm_struct *mm, unsigned int order) +{ + gfp_t gfp = GFP_PGTABLE_USER; + struct ptdesc *ptdesc; + + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + gfp &= ~__GFP_HIGHMEM; + + ptdesc = pagetable_alloc_noprof(gfp, order); + if (!ptdesc) + return NULL; + + pagetable_pgd_ctor(ptdesc); + return ptdesc_address(ptdesc); +} +#define __pgd_alloc(...) alloc_hooks(__pgd_alloc_noprof(__VA_ARGS__)) + +static inline void __pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + struct ptdesc *ptdesc = virt_to_ptdesc(pgd); + + BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); + pagetable_dtor_free(ptdesc); +} + #ifndef __HAVE_ARCH_PGD_FREE static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - pagetable_free(virt_to_ptdesc(pgd)); + __pgd_free(mm, pgd); } #endif diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 5a80fe728dc8..182b039ce5fa 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -5,7 +5,7 @@ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. * * This file is a stub providing documentation for what functions - * asm-ARCH/syscall.h files need to define. Most arch definitions + * arch/ARCH/include/asm/syscall.h files need to define. Most arch definitions * will be simple inlines. * * All of these functions expect to be called with no locks, diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 709830274b75..e402aef79c93 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -153,8 +153,9 @@ * * Useful if your architecture has non-page page directories. * - * When used, an architecture is expected to provide __tlb_remove_table() - * which does the actual freeing of these pages. + * When used, an architecture is expected to provide __tlb_remove_table() or + * use the generic __tlb_remove_table(), which does the actual freeing of these + * pages. * * MMU_GATHER_RCU_TABLE_FREE * @@ -207,16 +208,31 @@ struct mmu_table_batch { #define MAX_TABLE_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) +#ifndef __HAVE_ARCH_TLB_REMOVE_TABLE +static inline void __tlb_remove_table(void *table) +{ + struct ptdesc *ptdesc = (struct ptdesc *)table; + + pagetable_dtor_free(ptdesc); +} +#endif + extern void tlb_remove_table(struct mmu_gather *tlb, void *table); -#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */ +#else /* !CONFIG_MMU_GATHER_TABLE_FREE */ +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page); /* * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based * page directories and we can use the normal page batching to free them. */ -#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page)) +static inline void tlb_remove_table(struct mmu_gather *tlb, void *table) +{ + struct page *page = (struct page *)table; + pagetable_dtor(page_ptdesc(page)); + tlb_remove_page(tlb, page); +} #endif /* CONFIG_MMU_GATHER_TABLE_FREE */ #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h index aa5233b1eba9..d48dd4176fd3 100644 --- a/include/clocksource/hyperv_timer.h +++ b/include/clocksource/hyperv_timer.h @@ -15,7 +15,7 @@ #include <linux/clocksource.h> #include <linux/math64.h> -#include <asm/hyperv-tlfs.h> +#include <hyperv/hvhdk.h> #define HV_MAX_MAX_DELTA_TICKS 0xffffffff #define HV_MIN_DELTA_TICKS 1 diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h index 81330c6446f6..b0853f7cada0 100644 --- a/include/crypto/gf128mul.h +++ b/include/crypto/gf128mul.h @@ -158,12 +158,10 @@ 64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127 */ -/* A slow generic version of gf_mul, implemented for lle and bbe +/* A slow generic version of gf_mul, implemented for lle * It multiplies a and b and puts the result in a */ void gf128mul_lle(be128 *a, const be128 *b); -void gf128mul_bbe(be128 *a, const be128 *b); - /* * The following functions multiply a field element by x in * the polynomial field representation. They use 64-bit word operations @@ -224,9 +222,7 @@ struct gf128mul_4k { }; struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g); -struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g); void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t); -void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t); void gf128mul_x8_ble(le128 *r, const le128 *x); static inline void gf128mul_free_4k(struct gf128mul_4k *t) { diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 58967593b6b4..84da3424decc 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -12,20 +12,6 @@ #include <crypto/hash.h> struct ahash_request; -struct scatterlist; - -struct crypto_hash_walk { - char *data; - - unsigned int offset; - unsigned int flags; - - struct page *pg; - unsigned int entrylen; - - unsigned int total; - struct scatterlist *sg; -}; struct ahash_instance { void (*free)(struct ahash_instance *inst); @@ -57,15 +43,6 @@ struct crypto_shash_spawn { struct crypto_spawn base; }; -int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); -int crypto_hash_walk_first(struct ahash_request *req, - struct crypto_hash_walk *walk); - -static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) -{ - return !(walk->entrylen | walk->total); -} - int crypto_register_ahash(struct ahash_alg *alg); void crypto_unregister_ahash(struct ahash_alg *alg); int crypto_register_ahashes(struct ahash_alg *algs, int count); diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 7ae42afdcf3e..4f49621d3eb6 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -11,7 +11,6 @@ #include <crypto/algapi.h> #include <crypto/internal/cipher.h> #include <crypto/skcipher.h> -#include <linux/list.h> #include <linux/types.h> /* @@ -58,12 +57,6 @@ struct crypto_lskcipher_spawn { struct skcipher_walk { union { struct { - struct page *page; - unsigned long offset; - } phys; - - struct { - u8 *page; void *addr; } virt; } src, dst; @@ -74,8 +67,6 @@ struct skcipher_walk { struct scatter_walk out; unsigned int total; - struct list_head buffers; - u8 *page; u8 *buffer; u8 *oiv; @@ -205,17 +196,14 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count); int lskcipher_register_instance(struct crypto_template *tmpl, struct lskcipher_instance *inst); -int skcipher_walk_done(struct skcipher_walk *walk, int err); +int skcipher_walk_done(struct skcipher_walk *walk, int res); int skcipher_walk_virt(struct skcipher_walk *walk, struct skcipher_request *req, bool atomic); -int skcipher_walk_async(struct skcipher_walk *walk, - struct skcipher_request *req); int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, struct aead_request *req, bool atomic); int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, struct aead_request *req, bool atomic); -void skcipher_walk_complete(struct skcipher_walk *walk, int err); static inline void skcipher_walk_abort(struct skcipher_walk *walk) { diff --git a/include/drm/bridge/dw_mipi_dsi2.h b/include/drm/bridge/dw_mipi_dsi2.h new file mode 100644 index 000000000000..c18c49379247 --- /dev/null +++ b/include/drm/bridge/dw_mipi_dsi2.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, Fuzhou Rockchip Electronics Co., Ltd + * + * Authors: Guochun Huang <hero.huang@rock-chips.com> + * Heiko Stuebner <heiko.stuebner@cherry.de> + */ + +#ifndef __DW_MIPI_DSI2__ +#define __DW_MIPI_DSI2__ + +#include <linux/regmap.h> +#include <linux/types.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_modes.h> + +struct drm_display_mode; +struct drm_encoder; +struct dw_mipi_dsi2; +struct mipi_dsi_device; +struct platform_device; + +enum dw_mipi_dsi2_phy_type { + DW_MIPI_DSI2_DPHY, + DW_MIPI_DSI2_CPHY, +}; + +struct dw_mipi_dsi2_phy_iface { + int ppi_width; + enum dw_mipi_dsi2_phy_type phy_type; +}; + +struct dw_mipi_dsi2_phy_timing { + u32 data_hs2lp; + u32 data_lp2hs; +}; + +struct dw_mipi_dsi2_phy_ops { + int (*init)(void *priv_data); + void (*power_on)(void *priv_data); + void (*power_off)(void *priv_data); + void (*get_interface)(void *priv_data, struct dw_mipi_dsi2_phy_iface *iface); + int (*get_lane_mbps)(void *priv_data, + const struct drm_display_mode *mode, + unsigned long mode_flags, u32 lanes, u32 format, + unsigned int *lane_mbps); + int (*get_timing)(void *priv_data, unsigned int lane_mbps, + struct dw_mipi_dsi2_phy_timing *timing); + int (*get_esc_clk_rate)(void *priv_data, unsigned int *esc_clk_rate); +}; + +struct dw_mipi_dsi2_host_ops { + int (*attach)(void *priv_data, + struct mipi_dsi_device *dsi); + int (*detach)(void *priv_data, + struct mipi_dsi_device *dsi); +}; + +struct dw_mipi_dsi2_plat_data { + struct regmap *regmap; + unsigned int max_data_lanes; + + enum drm_mode_status (*mode_valid)(void *priv_data, + const struct drm_display_mode *mode, + unsigned long mode_flags, + u32 lanes, u32 format); + + bool (*mode_fixup)(void *priv_data, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + u32 *(*get_input_bus_fmts)(void *priv_data, + struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + + const struct dw_mipi_dsi2_phy_ops *phy_ops; + const struct dw_mipi_dsi2_host_ops *host_ops; + + void *priv_data; +}; + +struct dw_mipi_dsi2 *dw_mipi_dsi2_probe(struct platform_device *pdev, + const struct dw_mipi_dsi2_plat_data *plat_data); +void dw_mipi_dsi2_remove(struct dw_mipi_dsi2 *dsi2); +int dw_mipi_dsi2_bind(struct dw_mipi_dsi2 *dsi2, struct drm_encoder *encoder); +void dw_mipi_dsi2_unbind(struct dw_mipi_dsi2 *dsi2); + +#endif /* __DW_MIPI_DSI2__ */ diff --git a/include/drm/drm_client_setup.h b/include/drm/clients/drm_client_setup.h index 46aab3fb46be..46aab3fb46be 100644 --- a/include/drm/drm_client_setup.h +++ b/include/drm/clients/drm_client_setup.h diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index 279624833ea9..8f4054a56039 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -567,6 +567,11 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy, u8 link_status[DP_LINK_STATUS_SIZE]); +int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux, + int vcpid, u8 start_time_slot, u8 time_slot_count); +int drm_dp_dpcd_clear_payload(struct drm_dp_aux *aux); +int drm_dp_dpcd_poll_act_handled(struct drm_dp_aux *aux, int timeout_ms); + bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, u8 real_edid_checksum); diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index a80ba457a858..e39de161c938 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -867,8 +867,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); -fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, - int link_rate, int link_lane_count); +fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count); int drm_dp_calc_pbn_mode(int clock, int bpp); diff --git a/include/drm/display/drm_hdmi_audio_helper.h b/include/drm/display/drm_hdmi_audio_helper.h new file mode 100644 index 000000000000..c9a6faef4109 --- /dev/null +++ b/include/drm/display/drm_hdmi_audio_helper.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_DISPLAY_HDMI_AUDIO_HELPER_H_ +#define DRM_DISPLAY_HDMI_AUDIO_HELPER_H_ + +#include <linux/types.h> + +struct drm_connector; +struct drm_connector_hdmi_audio_funcs; + +struct device; + +int drm_connector_hdmi_audio_init(struct drm_connector *connector, + struct device *hdmi_codec_dev, + const struct drm_connector_hdmi_audio_funcs *funcs, + unsigned int max_i2s_playback_channels, + bool spdif_playback, + int sound_dai_port); +void drm_connector_hdmi_audio_plugged_notify(struct drm_connector *connector, + bool plugged); + +#endif diff --git a/include/drm/display/drm_hdmi_state_helper.h b/include/drm/display/drm_hdmi_state_helper.h index 2d45fcfa4619..44ec5c4a7503 100644 --- a/include/drm/display/drm_hdmi_state_helper.h +++ b/include/drm/display/drm_hdmi_state_helper.h @@ -6,8 +6,11 @@ struct drm_atomic_state; struct drm_connector; struct drm_connector_state; +struct drm_display_mode; struct hdmi_audio_infoframe; +enum drm_connector_status; + void __drm_atomic_helper_connector_hdmi_reset(struct drm_connector *connector, struct drm_connector_state *new_conn_state); @@ -19,5 +22,12 @@ int drm_atomic_helper_connector_hdmi_update_audio_infoframe(struct drm_connector int drm_atomic_helper_connector_hdmi_clear_audio_infoframe(struct drm_connector *connector); int drm_atomic_helper_connector_hdmi_update_infoframes(struct drm_connector *connector, struct drm_atomic_state *state); +void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector, + enum drm_connector_status status); +void drm_atomic_helper_connector_hdmi_force(struct drm_connector *connector); + +enum drm_mode_status +drm_hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode); #endif // DRM_HDMI_STATE_HELPER_H_ diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index e8d735b7f6a4..496dbbd2ad7e 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -41,6 +41,8 @@ struct drm_display_info; struct drm_minor; struct drm_panel; struct edid; +struct hdmi_codec_daifmt; +struct hdmi_codec_params; struct i2c_adapter; /** @@ -677,6 +679,57 @@ struct drm_bridge_funcs { const u8 *buffer, size_t len); /** + * @hdmi_audio_startup: + * + * Called when ASoC starts an audio stream setup. The + * @hdmi_audio_startup() is optional. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*hdmi_audio_startup)(struct drm_connector *connector, + struct drm_bridge *bridge); + + /** + * @hdmi_audio_prepare: + * Configures HDMI-encoder for audio stream. Can be called multiple + * times for each setup. Mandatory if HDMI audio is enabled in the + * bridge's configuration. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*hdmi_audio_prepare)(struct drm_connector *connector, + struct drm_bridge *bridge, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms); + + /** + * @hdmi_audio_shutdown: + * + * Shut down the audio stream. Mandatory if HDMI audio is enabled in + * the bridge's configuration. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + void (*hdmi_audio_shutdown)(struct drm_connector *connector, + struct drm_bridge *bridge); + + /** + * @hdmi_audio_mute_stream: + * + * Mute/unmute HDMI audio stream. The @hdmi_audio_mute_stream callback + * is optional. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*hdmi_audio_mute_stream)(struct drm_connector *connector, + struct drm_bridge *bridge, + bool enable, int direction); + + /** * @debugfs_init: * * Allows bridges to create bridge-specific debugfs files. @@ -859,6 +912,27 @@ struct drm_bridge { * @DRM_BRIDGE_OP_HDMI is set. */ unsigned int max_bpc; + + /** + * @hdmi_audio_dev: device to be used as a parent for the HDMI Codec + */ + struct device *hdmi_audio_dev; + + /** + * @hdmi_audio_max_i2s_playback_channels: maximum number of playback + * I2S channels for the HDMI codec + */ + int hdmi_audio_max_i2s_playback_channels; + + /** + * @hdmi_audio_spdif_playback: set if HDMI codec has S/PDIF playback port + */ + unsigned int hdmi_audio_spdif_playback : 1; + + /** + * @hdmi_audio_dai_port: sound DAI port, -1 if it is not enabled + */ + int hdmi_audio_dai_port; }; static inline struct drm_bridge * diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index e3fa43291f44..f13d597370a3 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -45,7 +45,10 @@ struct drm_property; struct drm_property_blob; struct drm_printer; struct drm_privacy_screen; +struct drm_edid; struct edid; +struct hdmi_codec_daifmt; +struct hdmi_codec_params; struct i2c_adapter; enum drm_connector_force { @@ -1141,6 +1144,53 @@ struct drm_connector_state { struct drm_connector_hdmi_state hdmi; }; +struct drm_connector_hdmi_audio_funcs { + /** + * @startup: + * + * Called when ASoC starts an audio stream setup. The + * @startup() is optional. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*startup)(struct drm_connector *connector); + + /** + * @prepare: + * Configures HDMI-encoder for audio stream. Can be called + * multiple times for each setup. Mandatory. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*prepare)(struct drm_connector *connector, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms); + + /** + * @shutdown: + * + * Shut down the audio stream. Mandatory. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + void (*shutdown)(struct drm_connector *connector); + + /** + * @mute_stream: + * + * Mute/unmute HDMI audio stream. The @mute_stream callback is + * optional. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*mute_stream)(struct drm_connector *connector, + bool enable, int direction); +}; + /** * struct drm_connector_hdmi_funcs - drm_hdmi_connector control functions */ @@ -1198,6 +1248,21 @@ struct drm_connector_hdmi_funcs { int (*write_infoframe)(struct drm_connector *connector, enum hdmi_infoframe_type type, const u8 *buffer, size_t len); + + /** + * @read_edid: + * + * This callback is used by the framework as a replacement for reading + * the EDID from connector->ddc. It is still recommended to provide + * connector->ddc instead of implementing this callback. Returned EDID + * should be freed via the drm_edid_free(). + * + * The @read_edid callback is optional. + * + * Returns: + * Valid EDID on success, NULL in case of failure. + */ + const struct drm_edid *(*read_edid)(struct drm_connector *connector); }; /** @@ -1660,6 +1725,68 @@ struct drm_cmdline_mode { bool tv_mode_specified; }; +/** + * struct drm_connector_hdmi_audio - DRM gemeric HDMI Codec-related structure + * + * HDMI drivers usually incorporate a HDMI Codec. This structure expresses the + * generic HDMI Codec as used by the DRM HDMI Codec framework. + */ +struct drm_connector_hdmi_audio { + /** + * @funcs: + * + * Implementation of the HDMI codec functionality to be used by the DRM + * HDMI Codec framework. + */ + const struct drm_connector_hdmi_audio_funcs *funcs; + + /** + * @codec_pdev: + * + * Platform device created to hold the HDMI Codec. It will be + * automatically unregistered during drm_connector_cleanup(). + */ + struct platform_device *codec_pdev; + + /** + * @lock: + * + * Mutex to protect @last_state, @plugged_cb and @plugged_cb_dev. + */ + struct mutex lock; + + /** + * @plugged_cb: + * + * Callback to be called when the HDMI sink get plugged to or unplugged + * from this connector. This is assigned by the framework when + * requested by the ASoC code. + */ + void (*plugged_cb)(struct device *dev, bool plugged); + + /** + * @plugged_cb_dev: + * + * The data for @plugged_cb(). It is being provided by the ASoC. + */ + struct device *plugged_cb_dev; + + /** + * @last_state: + * + * Last plugged state recored by the framework. It is used to correctly + * report the state to @plugged_cb(). + */ + bool last_state; + + /** + * @dai_port: + * + * The port in DT that is used for the Codec DAI. + */ + int dai_port; +}; + /* * struct drm_connector_hdmi - DRM Connector HDMI-related structure */ @@ -2001,8 +2128,11 @@ struct drm_connector { struct drm_encoder *encoder; #define MAX_ELD_BYTES 128 - /** @eld: EDID-like data, if present */ + /** @eld: EDID-like data, if present, protected by @eld_mutex */ uint8_t eld[MAX_ELD_BYTES]; + /** @eld_mutex: protection for concurrenct access to @eld */ + struct mutex eld_mutex; + /** @latency_present: AV delay info from ELD, if found */ bool latency_present[2]; /** @@ -2118,6 +2248,11 @@ struct drm_connector { * @hdmi: HDMI-related variable and properties. */ struct drm_connector_hdmi hdmi; + + /** + * @hdmi_audio: HDMI codec properties and non-DRM state. + */ + struct drm_connector_hdmi_audio hdmi_audio; }; #define obj_to_connector(x) container_of(x, struct drm_connector, base) @@ -2126,6 +2261,11 @@ int drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type); +int drm_connector_dynamic_init(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type, + struct i2c_adapter *ddc); int drm_connector_init_with_ddc(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, @@ -2147,6 +2287,7 @@ int drmm_connector_hdmi_init(struct drm_device *dev, unsigned int max_bpc); void drm_connector_attach_edid_property(struct drm_connector *connector); int drm_connector_register(struct drm_connector *connector); +int drm_connector_dynamic_register(struct drm_connector *connector); void drm_connector_unregister(struct drm_connector *connector); int drm_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder); diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 1bbbcb8e2d23..9952b846c170 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -34,6 +34,7 @@ #include <drm/drm_device.h> +struct dmem_cgroup_region; struct drm_fb_helper; struct drm_fb_helper_surface_size; struct drm_file; @@ -401,8 +402,6 @@ struct drm_driver { char *name; /** @desc: driver description */ char *desc; - /** @date: driver date, unused, to be removed */ - char *date; /** * @driver_features: @@ -438,6 +437,10 @@ void *__devm_drm_dev_alloc(struct device *parent, const struct drm_driver *driver, size_t size, size_t offset); +struct dmem_cgroup_region * +drmm_cgroup_register_region(struct drm_device *dev, + const char *region_name, u64 size); + /** * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance * @parent: Parent device object diff --git a/include/drm/drm_fbdev_client.h b/include/drm/drm_fbdev_client.h deleted file mode 100644 index e11a5614f127..000000000000 --- a/include/drm/drm_fbdev_client.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: MIT */ - -#ifndef DRM_FBDEV_CLIENT_H -#define DRM_FBDEV_CLIENT_H - -struct drm_device; -struct drm_format_info; - -#ifdef CONFIG_DRM_FBDEV_EMULATION -int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format); -#else -static inline int drm_fbdev_client_setup(struct drm_device *dev, - const struct drm_format_info *format) -{ - return 0; -} -#endif - -#endif diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index f0ef32e9fa5e..ef817926cddd 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -494,6 +494,7 @@ struct drm_memory_stats { enum drm_gem_object_status; +int drm_memory_stats_is_zero(const struct drm_memory_stats *stats); void drm_print_memory_stats(struct drm_printer *p, const struct drm_memory_stats *stats, enum drm_gem_object_status supported_status, diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 5b8b1b059d32..fdae947682cd 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -48,19 +48,21 @@ struct drm_gem_object; * enum drm_gem_object_status - bitmask of object state for fdinfo reporting * @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned) * @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace + * @DRM_GEM_OBJECT_ACTIVE: object is currently used by an active submission * * Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status - * and drm_show_fdinfo(). Note that an object can DRM_GEM_OBJECT_PURGEABLE if - * it still active or not resident, in which case drm_show_fdinfo() will not + * and drm_show_fdinfo(). Note that an object can report DRM_GEM_OBJECT_PURGEABLE + * and be active or not resident, in which case drm_show_fdinfo() will not * account for it as purgeable. So drivers do not need to check if the buffer - * is idle and resident to return this bit. (Ie. userspace can mark a buffer - * as purgeable even while it is still busy on the GPU.. it does not _actually_ - * become puregeable until it becomes idle. The status gem object func does - * not need to consider this.) + * is idle and resident to return this bit, i.e. userspace can mark a buffer as + * purgeable even while it is still busy on the GPU. It will not get reported in + * the puregeable stats until it becomes idle. The status gem object func does + * not need to consider this. */ enum drm_gem_object_status { DRM_GEM_OBJECT_RESIDENT = BIT(0), DRM_GEM_OBJECT_PURGEABLE = BIT(1), + DRM_GEM_OBJECT_ACTIVE = BIT(2), }; /** diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index b3906dc04388..f77fe1531cf8 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -199,6 +199,8 @@ void drm_puts(struct drm_printer *p, const char *str); void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset); void drm_print_bits(struct drm_printer *p, unsigned long value, const char * const bits[], unsigned int nbits); +void drm_print_hex_dump(struct drm_printer *p, const char *prefix, + const u8 *buf, size_t len); __printf(2, 0) /** diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h index 70775748d243..15fa9b6865f4 100644 --- a/include/drm/drm_utils.h +++ b/include/drm/drm_utils.h @@ -12,8 +12,12 @@ #include <linux/types.h> +struct drm_edid; + int drm_get_panel_orientation_quirk(int width, int height); +int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid); + signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec); #endif diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h index 32480b5563db..77c826589ec1 100644 --- a/include/drm/intel/pciids.h +++ b/include/drm/intel/pciids.h @@ -717,37 +717,66 @@ MACRO__(0xA7AB, ## __VA_ARGS__) /* DG2 */ -#define INTEL_DG2_G10_IDS(MACRO__, ...) \ - MACRO__(0x5690, ## __VA_ARGS__), \ - MACRO__(0x5691, ## __VA_ARGS__), \ - MACRO__(0x5692, ## __VA_ARGS__), \ +#define INTEL_DG2_G10_D_IDS(MACRO__, ...) \ MACRO__(0x56A0, ## __VA_ARGS__), \ MACRO__(0x56A1, ## __VA_ARGS__), \ - MACRO__(0x56A2, ## __VA_ARGS__), \ + MACRO__(0x56A2, ## __VA_ARGS__) + +#define INTEL_DG2_G10_E_IDS(MACRO__, ...) \ MACRO__(0x56BE, ## __VA_ARGS__), \ MACRO__(0x56BF, ## __VA_ARGS__) -#define INTEL_DG2_G11_IDS(MACRO__, ...) \ - MACRO__(0x5693, ## __VA_ARGS__), \ - MACRO__(0x5694, ## __VA_ARGS__), \ - MACRO__(0x5695, ## __VA_ARGS__), \ +#define INTEL_DG2_G10_M_IDS(MACRO__, ...) \ + MACRO__(0x5690, ## __VA_ARGS__), \ + MACRO__(0x5691, ## __VA_ARGS__), \ + MACRO__(0x5692, ## __VA_ARGS__) + +#define INTEL_DG2_G10_IDS(MACRO__, ...) \ + INTEL_DG2_G10_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G10_E_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G10_M_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_DG2_G11_D_IDS(MACRO__, ...) \ MACRO__(0x56A5, ## __VA_ARGS__), \ MACRO__(0x56A6, ## __VA_ARGS__), \ MACRO__(0x56B0, ## __VA_ARGS__), \ - MACRO__(0x56B1, ## __VA_ARGS__), \ + MACRO__(0x56B1, ## __VA_ARGS__) + +#define INTEL_DG2_G11_E_IDS(MACRO__, ...) \ MACRO__(0x56BA, ## __VA_ARGS__), \ MACRO__(0x56BB, ## __VA_ARGS__), \ MACRO__(0x56BC, ## __VA_ARGS__), \ MACRO__(0x56BD, ## __VA_ARGS__) -#define INTEL_DG2_G12_IDS(MACRO__, ...) \ - MACRO__(0x5696, ## __VA_ARGS__), \ - MACRO__(0x5697, ## __VA_ARGS__), \ +#define INTEL_DG2_G11_M_IDS(MACRO__, ...) \ + MACRO__(0x5693, ## __VA_ARGS__), \ + MACRO__(0x5694, ## __VA_ARGS__), \ + MACRO__(0x5695, ## __VA_ARGS__) + +#define INTEL_DG2_G11_IDS(MACRO__, ...) \ + INTEL_DG2_G11_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G11_E_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G11_M_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_DG2_G12_D_IDS(MACRO__, ...) \ MACRO__(0x56A3, ## __VA_ARGS__), \ MACRO__(0x56A4, ## __VA_ARGS__), \ MACRO__(0x56B2, ## __VA_ARGS__), \ MACRO__(0x56B3, ## __VA_ARGS__) +#define INTEL_DG2_G12_M_IDS(MACRO__, ...) \ + MACRO__(0x5696, ## __VA_ARGS__), \ + MACRO__(0x5697, ## __VA_ARGS__) + +#define INTEL_DG2_G12_IDS(MACRO__, ...) \ + INTEL_DG2_G12_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G12_M_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_DG2_D_IDS(MACRO__, ...) \ + INTEL_DG2_G10_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G11_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G12_D_IDS(MACRO__, ## __VA_ARGS__) + #define INTEL_DG2_IDS(MACRO__, ...) \ INTEL_DG2_G10_IDS(MACRO__, ## __VA_ARGS__), \ INTEL_DG2_G11_IDS(MACRO__, ## __VA_ARGS__), \ @@ -829,6 +858,7 @@ MACRO__(0xB092, ## __VA_ARGS__), \ MACRO__(0xB0A0, ## __VA_ARGS__), \ MACRO__(0xB0A1, ## __VA_ARGS__), \ - MACRO__(0xB0A2, ## __VA_ARGS__) + MACRO__(0xB0A2, ## __VA_ARGS__), \ + MACRO__(0xB0B0, ## __VA_ARGS__) #endif /* __PCIIDS_H__ */ diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h index 5804408815be..8ea11cd8df39 100644 --- a/include/drm/ttm/ttm_bo.h +++ b/include/drm/ttm/ttm_bo.h @@ -421,6 +421,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo); int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man, struct ttm_operation_ctx *ctx); +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, + void *buf, int len, int write); vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, struct vm_fault *vmf); vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index be034be56ba1..ee688d0c029b 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -38,6 +38,7 @@ #define TTM_MAX_BO_PRIORITY 4U #define TTM_NUM_MEM_TYPES 8 +struct dmem_cgroup_device; struct ttm_device; struct ttm_resource_manager; struct ttm_resource; @@ -211,6 +212,11 @@ struct ttm_resource_manager { * bdev->lru_lock. */ uint64_t usage; + + /** + * @cg: &dmem_cgroup_region used for memory accounting, if not NULL. + */ + struct dmem_cgroup_region *cg; }; /** @@ -239,6 +245,7 @@ struct ttm_bus_placement { * @placement: Placement flags. * @bus: Placement on io bus accessible to the CPU * @bo: weak reference to the BO, protected by ttm_device::lru_lock + * @css: cgroup state this resource is charged to * * Structure indicating the placement and space resources used by a * buffer object. @@ -251,6 +258,8 @@ struct ttm_resource { struct ttm_bus_placement bus; struct ttm_buffer_object *bo; + struct dmem_cgroup_pool_state *css; + /** * @lru: Least recently used list, see &ttm_resource_manager.lru */ @@ -432,7 +441,8 @@ void ttm_resource_fini(struct ttm_resource_manager *man, int ttm_resource_alloc(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource **res); + struct ttm_resource **res, + struct dmem_cgroup_pool_state **ret_limit_pool); void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res); bool ttm_resource_intersects(struct ttm_device *bdev, struct ttm_resource *res, diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h index e850dc3a1ad3..1b3e0176dcb7 100644 --- a/include/dt-bindings/arm/qcom,ids.h +++ b/include/dt-bindings/arm/qcom,ids.h @@ -284,6 +284,7 @@ #define QCOM_ID_QCS9100 667 #define QCOM_ID_QCS8300 674 #define QCOM_ID_QCS8275 675 +#define QCOM_ID_QCS9075 676 #define QCOM_ID_QCS615 680 /* diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h index 6ede88c3992d..f2a7b7d39c0d 100644 --- a/include/dt-bindings/clock/at91.h +++ b/include/dt-bindings/clock/at91.h @@ -42,6 +42,10 @@ #define PMC_PLLADIV2 (PMC_MAIN + 11) #define PMC_LVDSPLL (PMC_MAIN + 12) +/* SAMA7D65 */ +#define PMC_MCK3 (PMC_MAIN + 13) +#define PMC_MCK5 (PMC_MAIN + 14) + #ifndef AT91_PMC_MOSCS #define AT91_PMC_MOSCS 0 /* MOSCS Flag */ #define AT91_PMC_LOCKA 1 /* PLLA Lock */ @@ -55,4 +59,8 @@ #define AT91_PMC_GCKRDY 24 /* Generated Clocks */ #endif +/* Slow clock. */ +#define SCKC_MD_SLCK 0 +#define SCKC_TD_SLCK 1 + #endif diff --git a/include/dt-bindings/clock/en7523-clk.h b/include/dt-bindings/clock/en7523-clk.h index 717d23a5e5ae..edfa64045f52 100644 --- a/include/dt-bindings/clock/en7523-clk.h +++ b/include/dt-bindings/clock/en7523-clk.h @@ -12,6 +12,6 @@ #define EN7523_CLK_CRYPTO 6 #define EN7523_CLK_PCIE 7 -#define EN7523_NUM_CLOCKS 8 +#define EN7581_CLK_EMMC 8 #endif /* _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_ */ diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h index 6c685067288b..c393fad3a346 100644 --- a/include/dt-bindings/clock/imx93-clock.h +++ b/include/dt-bindings/clock/imx93-clock.h @@ -209,5 +209,6 @@ #define IMX91_CLK_ENET2_REGULAR 204 #define IMX91_CLK_ENET2_REGULAR_GATE 205 #define IMX91_CLK_ENET1_QOS_TSN_GATE 206 +#define IMX93_CLK_SPDIF_IPG 207 #endif diff --git a/include/dt-bindings/clock/qcom,ipq-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h new file mode 100644 index 000000000000..936e92b3b62c --- /dev/null +++ b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H +#define _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H + +/* CMN PLL core clock. */ +#define CMN_PLL_CLK 0 + +/* The output clocks from CMN PLL of IPQ9574. */ +#define XO_24MHZ_CLK 1 +#define SLEEP_32KHZ_CLK 2 +#define PCS_31P25MHZ_CLK 3 +#define NSS_1200MHZ_CLK 4 +#define PPE_353MHZ_CLK 5 +#define ETH0_50MHZ_CLK 6 +#define ETH1_50MHZ_CLK 7 +#define ETH2_50MHZ_CLK 8 +#define ETH_25MHZ_CLK 9 +#endif diff --git a/include/dt-bindings/clock/qcom,ipq5424-gcc.h b/include/dt-bindings/clock/qcom,ipq5424-gcc.h index 755ce7a71c7c..c15ad16923bd 100644 --- a/include/dt-bindings/clock/qcom,ipq5424-gcc.h +++ b/include/dt-bindings/clock/qcom,ipq5424-gcc.h @@ -12,7 +12,6 @@ #define GPLL2 2 #define GPLL2_OUT_MAIN 3 #define GCC_SLEEP_CLK_SRC 4 -#define GCC_APSS_DBG_CLK 5 #define GCC_USB0_EUD_AT_CLK 6 #define GCC_PCIE0_AXI_M_CLK_SRC 7 #define GCC_PCIE0_AXI_M_CLK 8 @@ -152,5 +151,6 @@ #define GCC_PCIE3_RCHNG_CLK_SRC 142 #define GCC_PCIE3_RCHNG_CLK 143 #define GCC_IM_SLEEP_CLK 144 +#define GCC_XO_CLK 145 #endif diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8960.h b/include/dt-bindings/clock/qcom,mmcc-msm8960.h index 81714fc859c5..717431d735c1 100644 --- a/include/dt-bindings/clock/qcom,mmcc-msm8960.h +++ b/include/dt-bindings/clock/qcom,mmcc-msm8960.h @@ -133,5 +133,7 @@ #define VCAP_CLK 124 #define VCAP_NPL_CLK 125 #define PLL15 126 +#define DSI2_PIXEL_LVDS_SRC 127 +#define LVDS_CLK 128 #endif diff --git a/include/dt-bindings/clock/qcom,qcs615-gcc.h b/include/dt-bindings/clock/qcom,qcs615-gcc.h new file mode 100644 index 000000000000..9704091636b8 --- /dev/null +++ b/include/dt-bindings/clock/qcom,qcs615-gcc.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCS615_H +#define _DT_BINDINGS_CLK_QCOM_GCC_QCS615_H + +/* GCC clocks */ +#define GPLL0_OUT_AUX2_DIV 0 +#define GPLL3_OUT_AUX2_DIV 1 +#define GPLL0 2 +#define GPLL3 3 +#define GPLL4 4 +#define GPLL6 5 +#define GPLL6_OUT_MAIN 6 +#define GPLL7 7 +#define GPLL8 8 +#define GPLL8_OUT_MAIN 9 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 10 +#define GCC_AGGRE_USB2_SEC_AXI_CLK 11 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 12 +#define GCC_AHB2PHY_EAST_CLK 13 +#define GCC_AHB2PHY_WEST_CLK 14 +#define GCC_BOOT_ROM_AHB_CLK 15 +#define GCC_CAMERA_AHB_CLK 16 +#define GCC_CAMERA_HF_AXI_CLK 17 +#define GCC_CAMERA_XO_CLK 18 +#define GCC_CE1_AHB_CLK 19 +#define GCC_CE1_AXI_CLK 20 +#define GCC_CE1_CLK 21 +#define GCC_CFG_NOC_USB2_SEC_AXI_CLK 22 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 23 +#define GCC_CPUSS_AHB_CLK 24 +#define GCC_CPUSS_AHB_CLK_SRC 25 +#define GCC_CPUSS_GNOC_CLK 26 +#define GCC_DDRSS_GPU_AXI_CLK 27 +#define GCC_DISP_AHB_CLK 28 +#define GCC_DISP_GPLL0_DIV_CLK_SRC 29 +#define GCC_DISP_HF_AXI_CLK 30 +#define GCC_DISP_XO_CLK 31 +#define GCC_EMAC_AXI_CLK 32 +#define GCC_EMAC_PTP_CLK 33 +#define GCC_EMAC_PTP_CLK_SRC 34 +#define GCC_EMAC_RGMII_CLK 35 +#define GCC_EMAC_RGMII_CLK_SRC 36 +#define GCC_EMAC_SLV_AHB_CLK 37 +#define GCC_GP1_CLK 38 +#define GCC_GP1_CLK_SRC 39 +#define GCC_GP2_CLK 40 +#define GCC_GP2_CLK_SRC 41 +#define GCC_GP3_CLK 42 +#define GCC_GP3_CLK_SRC 43 +#define GCC_GPU_CFG_AHB_CLK 44 +#define GCC_GPU_GPLL0_CLK_SRC 45 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 46 +#define GCC_GPU_IREF_CLK 47 +#define GCC_GPU_MEMNOC_GFX_CLK 48 +#define GCC_GPU_SNOC_DVM_GFX_CLK 49 +#define GCC_PCIE0_PHY_REFGEN_CLK 50 +#define GCC_PCIE_0_AUX_CLK 51 +#define GCC_PCIE_0_AUX_CLK_SRC 52 +#define GCC_PCIE_0_CFG_AHB_CLK 53 +#define GCC_PCIE_0_CLKREF_CLK 54 +#define GCC_PCIE_0_MSTR_AXI_CLK 55 +#define GCC_PCIE_0_PIPE_CLK 56 +#define GCC_PCIE_0_SLV_AXI_CLK 57 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 58 +#define GCC_PCIE_PHY_AUX_CLK 59 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 60 +#define GCC_PDM2_CLK 61 +#define GCC_PDM2_CLK_SRC 62 +#define GCC_PDM_AHB_CLK 63 +#define GCC_PDM_XO4_CLK 64 +#define GCC_PRNG_AHB_CLK 65 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 66 +#define GCC_QMIP_DISP_AHB_CLK 67 +#define GCC_QMIP_PCIE_AHB_CLK 68 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 69 +#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 70 +#define GCC_QSPI_CORE_CLK 71 +#define GCC_QSPI_CORE_CLK_SRC 72 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 73 +#define GCC_QUPV3_WRAP0_CORE_CLK 74 +#define GCC_QUPV3_WRAP0_S0_CLK 75 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 76 +#define GCC_QUPV3_WRAP0_S1_CLK 77 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 78 +#define GCC_QUPV3_WRAP0_S2_CLK 79 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 80 +#define GCC_QUPV3_WRAP0_S3_CLK 81 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 82 +#define GCC_QUPV3_WRAP0_S4_CLK 83 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 84 +#define GCC_QUPV3_WRAP0_S5_CLK 85 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 86 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 87 +#define GCC_QUPV3_WRAP1_CORE_CLK 88 +#define GCC_QUPV3_WRAP1_S0_CLK 89 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 90 +#define GCC_QUPV3_WRAP1_S1_CLK 91 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 92 +#define GCC_QUPV3_WRAP1_S2_CLK 93 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 94 +#define GCC_QUPV3_WRAP1_S3_CLK 95 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 96 +#define GCC_QUPV3_WRAP1_S4_CLK 97 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 98 +#define GCC_QUPV3_WRAP1_S5_CLK 99 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 100 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 101 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 102 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 103 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 104 +#define GCC_RX1_USB2_CLKREF_CLK 105 +#define GCC_RX3_USB2_CLKREF_CLK 106 +#define GCC_SDCC1_AHB_CLK 107 +#define GCC_SDCC1_APPS_CLK 108 +#define GCC_SDCC1_APPS_CLK_SRC 109 +#define GCC_SDCC1_ICE_CORE_CLK 110 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 111 +#define GCC_SDCC2_AHB_CLK 112 +#define GCC_SDCC2_APPS_CLK 113 +#define GCC_SDCC2_APPS_CLK_SRC 114 +#define GCC_SDR_CORE_CLK 115 +#define GCC_SDR_CSR_HCLK 116 +#define GCC_SDR_PRI_MI2S_CLK 117 +#define GCC_SDR_SEC_MI2S_CLK 118 +#define GCC_SDR_WR0_MEM_CLK 119 +#define GCC_SDR_WR1_MEM_CLK 120 +#define GCC_SDR_WR2_MEM_CLK 121 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 122 +#define GCC_UFS_CARD_CLKREF_CLK 123 +#define GCC_UFS_MEM_CLKREF_CLK 124 +#define GCC_UFS_PHY_AHB_CLK 125 +#define GCC_UFS_PHY_AXI_CLK 126 +#define GCC_UFS_PHY_AXI_CLK_SRC 127 +#define GCC_UFS_PHY_ICE_CORE_CLK 128 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 129 +#define GCC_UFS_PHY_PHY_AUX_CLK 130 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 131 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 132 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 133 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 134 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 135 +#define GCC_USB20_SEC_MASTER_CLK 136 +#define GCC_USB20_SEC_MASTER_CLK_SRC 137 +#define GCC_USB20_SEC_MOCK_UTMI_CLK 138 +#define GCC_USB20_SEC_MOCK_UTMI_CLK_SRC 139 +#define GCC_USB20_SEC_SLEEP_CLK 140 +#define GCC_USB2_PRIM_CLKREF_CLK 141 +#define GCC_USB2_SEC_CLKREF_CLK 142 +#define GCC_USB2_SEC_PHY_AUX_CLK 143 +#define GCC_USB2_SEC_PHY_AUX_CLK_SRC 144 +#define GCC_USB2_SEC_PHY_COM_AUX_CLK 145 +#define GCC_USB2_SEC_PHY_PIPE_CLK 146 +#define GCC_USB30_PRIM_MASTER_CLK 147 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 148 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 149 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 150 +#define GCC_USB30_PRIM_SLEEP_CLK 151 +#define GCC_USB3_PRIM_CLKREF_CLK 152 +#define GCC_USB3_PRIM_PHY_AUX_CLK 153 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 154 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 155 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 156 +#define GCC_USB3_SEC_CLKREF_CLK 157 +#define GCC_VIDEO_AHB_CLK 158 +#define GCC_VIDEO_AXI0_CLK 159 +#define GCC_VIDEO_XO_CLK 160 +#define GCC_VSENSOR_CLK_SRC 161 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 162 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 163 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 164 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 165 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 166 + +/* GCC Resets */ +#define GCC_EMAC_BCR 0 +#define GCC_QUSB2PHY_PRIM_BCR 1 +#define GCC_QUSB2PHY_SEC_BCR 2 +#define GCC_USB30_PRIM_BCR 3 +#define GCC_USB2_PHY_SEC_BCR 4 +#define GCC_USB3_DP_PHY_SEC_BCR 5 +#define GCC_USB3PHY_PHY_SEC_BCR 6 +#define GCC_PCIE_0_BCR 7 +#define GCC_PCIE_0_PHY_BCR 8 +#define GCC_PCIE_PHY_BCR 9 +#define GCC_PCIE_PHY_COM_BCR 10 +#define GCC_UFS_PHY_BCR 11 +#define GCC_USB20_SEC_BCR 12 +#define GCC_USB3_PHY_PRIM_SP0_BCR 13 +#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 14 +#define GCC_SDCC1_BCR 15 +#define GCC_SDCC2_BCR 16 + +/* GCC power domains */ +#define EMAC_GDSC 0 +#define PCIE_0_GDSC 1 +#define UFS_PHY_GDSC 2 +#define USB20_SEC_GDSC 3 +#define USB30_PRIM_GDSC 4 +#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 5 +#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 6 +#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 7 +#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 8 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 9 +#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 10 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 11 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm6115-lpasscc.h b/include/dt-bindings/clock/qcom,sm6115-lpasscc.h new file mode 100644 index 000000000000..799274517c9a --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm6115-lpasscc.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Linaro Ltd. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_LPASSCC_SM6115_H +#define _DT_BINDINGS_CLK_QCOM_LPASSCC_SM6115_H + +/* LPASS CC */ +#define LPASS_SWR_TX_CONFIG_CGCR 0 + +/* LPASS_AUDIO CC */ +#define LPASS_AUDIO_SWR_RX_CGCR 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8750-dispcc.h b/include/dt-bindings/clock/qcom,sm8750-dispcc.h new file mode 100644 index 000000000000..dafb5069c96a --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8750-dispcc.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2024, Linaro Ltd. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_SM8750_DISP_CC_H +#define _DT_BINDINGS_CLK_QCOM_SM8750_DISP_CC_H + +/* DISP_CC clocks */ +#define DISP_CC_ESYNC0_CLK 0 +#define DISP_CC_ESYNC0_CLK_SRC 1 +#define DISP_CC_ESYNC1_CLK 2 +#define DISP_CC_ESYNC1_CLK_SRC 3 +#define DISP_CC_MDSS_ACCU_SHIFT_CLK 4 +#define DISP_CC_MDSS_AHB1_CLK 5 +#define DISP_CC_MDSS_AHB_CLK 6 +#define DISP_CC_MDSS_AHB_CLK_SRC 7 +#define DISP_CC_MDSS_BYTE0_CLK 8 +#define DISP_CC_MDSS_BYTE0_CLK_SRC 9 +#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 10 +#define DISP_CC_MDSS_BYTE0_INTF_CLK 11 +#define DISP_CC_MDSS_BYTE1_CLK 12 +#define DISP_CC_MDSS_BYTE1_CLK_SRC 13 +#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 14 +#define DISP_CC_MDSS_BYTE1_INTF_CLK 15 +#define DISP_CC_MDSS_DPTX0_AUX_CLK 16 +#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 17 +#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 18 +#define DISP_CC_MDSS_DPTX0_LINK_CLK 19 +#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 20 +#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 21 +#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 22 +#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 23 +#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 24 +#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 25 +#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 26 +#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 27 +#define DISP_CC_MDSS_DPTX1_AUX_CLK 28 +#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 29 +#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 30 +#define DISP_CC_MDSS_DPTX1_LINK_CLK 31 +#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 32 +#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 33 +#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 34 +#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 35 +#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 36 +#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 37 +#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 38 +#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 39 +#define DISP_CC_MDSS_DPTX2_AUX_CLK 40 +#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 41 +#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 42 +#define DISP_CC_MDSS_DPTX2_LINK_CLK 43 +#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 44 +#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 45 +#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 46 +#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 47 +#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 48 +#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 49 +#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 50 +#define DISP_CC_MDSS_DPTX3_AUX_CLK 51 +#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 52 +#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 53 +#define DISP_CC_MDSS_DPTX3_LINK_CLK 54 +#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 55 +#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 56 +#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 57 +#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 58 +#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 59 +#define DISP_CC_MDSS_ESC0_CLK 60 +#define DISP_CC_MDSS_ESC0_CLK_SRC 61 +#define DISP_CC_MDSS_ESC1_CLK 62 +#define DISP_CC_MDSS_ESC1_CLK_SRC 63 +#define DISP_CC_MDSS_MDP1_CLK 64 +#define DISP_CC_MDSS_MDP_CLK 65 +#define DISP_CC_MDSS_MDP_CLK_SRC 66 +#define DISP_CC_MDSS_MDP_LUT1_CLK 67 +#define DISP_CC_MDSS_MDP_LUT_CLK 68 +#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 69 +#define DISP_CC_MDSS_PCLK0_CLK 70 +#define DISP_CC_MDSS_PCLK0_CLK_SRC 71 +#define DISP_CC_MDSS_PCLK1_CLK 72 +#define DISP_CC_MDSS_PCLK1_CLK_SRC 73 +#define DISP_CC_MDSS_PCLK2_CLK 74 +#define DISP_CC_MDSS_PCLK2_CLK_SRC 75 +#define DISP_CC_MDSS_RSCC_AHB_CLK 76 +#define DISP_CC_MDSS_RSCC_VSYNC_CLK 77 +#define DISP_CC_MDSS_VSYNC1_CLK 78 +#define DISP_CC_MDSS_VSYNC_CLK 79 +#define DISP_CC_MDSS_VSYNC_CLK_SRC 80 +#define DISP_CC_OSC_CLK 81 +#define DISP_CC_OSC_CLK_SRC 82 +#define DISP_CC_PLL0 83 +#define DISP_CC_PLL1 84 +#define DISP_CC_PLL2 85 +#define DISP_CC_SLEEP_CLK 86 +#define DISP_CC_SLEEP_CLK_SRC 87 +#define DISP_CC_XO_CLK 88 +#define DISP_CC_XO_CLK_SRC 89 + +/* DISP_CC resets */ +#define DISP_CC_MDSS_CORE_BCR 0 +#define DISP_CC_MDSS_CORE_INT2_BCR 1 +#define DISP_CC_MDSS_RSCC_BCR 2 + +/* DISP_CC GDSCR */ +#define MDSS_GDSC 0 +#define MDSS_INT2_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8750-gcc.h b/include/dt-bindings/clock/qcom,sm8750-gcc.h new file mode 100644 index 000000000000..e234595d7f42 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8750-gcc.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8750_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM8750_H + +/* GCC clocks */ +#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 1 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3 +#define GCC_BOOT_ROM_AHB_CLK 4 +#define GCC_CAM_BIST_MCLK_AHB_CLK 5 +#define GCC_CAMERA_AHB_CLK 6 +#define GCC_CAMERA_HF_AXI_CLK 7 +#define GCC_CAMERA_SF_AXI_CLK 8 +#define GCC_CAMERA_XO_CLK 9 +#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 10 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 11 +#define GCC_CNOC_PCIE_SF_AXI_CLK 12 +#define GCC_DDRSS_GPU_AXI_CLK 13 +#define GCC_DDRSS_PCIE_SF_QTB_CLK 14 +#define GCC_DISP_AHB_CLK 15 +#define GCC_DISP_HF_AXI_CLK 16 +#define GCC_EVA_AHB_CLK 17 +#define GCC_EVA_AXI0_CLK 18 +#define GCC_EVA_AXI0C_CLK 19 +#define GCC_EVA_XO_CLK 20 +#define GCC_GP1_CLK 21 +#define GCC_GP1_CLK_SRC 22 +#define GCC_GP2_CLK 23 +#define GCC_GP2_CLK_SRC 24 +#define GCC_GP3_CLK 25 +#define GCC_GP3_CLK_SRC 26 +#define GCC_GPLL0 27 +#define GCC_GPLL0_OUT_EVEN 28 +#define GCC_GPLL1 29 +#define GCC_GPLL4 30 +#define GCC_GPLL7 31 +#define GCC_GPLL9 32 +#define GCC_GPU_CFG_AHB_CLK 33 +#define GCC_GPU_GEMNOC_GFX_CLK 34 +#define GCC_GPU_GPLL0_CLK_SRC 35 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 36 +#define GCC_PCIE_0_AUX_CLK 37 +#define GCC_PCIE_0_AUX_CLK_SRC 38 +#define GCC_PCIE_0_CFG_AHB_CLK 39 +#define GCC_PCIE_0_MSTR_AXI_CLK 40 +#define GCC_PCIE_0_PHY_RCHNG_CLK 41 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 42 +#define GCC_PCIE_0_PIPE_CLK 43 +#define GCC_PCIE_0_PIPE_CLK_SRC 44 +#define GCC_PCIE_0_SLV_AXI_CLK 45 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 46 +#define GCC_PCIE_RSCC_CFG_AHB_CLK 47 +#define GCC_PCIE_RSCC_XO_CLK 48 +#define GCC_PDM2_CLK 49 +#define GCC_PDM2_CLK_SRC 50 +#define GCC_PDM_AHB_CLK 51 +#define GCC_PDM_XO4_CLK 52 +#define GCC_QMIP_CAMERA_CMD_AHB_CLK 53 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 54 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 55 +#define GCC_QMIP_GPU_AHB_CLK 56 +#define GCC_QMIP_PCIE_AHB_CLK 57 +#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 58 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 59 +#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 60 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 61 +#define GCC_QUPV3_I2C_CORE_CLK 62 +#define GCC_QUPV3_I2C_S0_CLK 63 +#define GCC_QUPV3_I2C_S0_CLK_SRC 64 +#define GCC_QUPV3_I2C_S1_CLK 65 +#define GCC_QUPV3_I2C_S1_CLK_SRC 66 +#define GCC_QUPV3_I2C_S2_CLK 67 +#define GCC_QUPV3_I2C_S2_CLK_SRC 68 +#define GCC_QUPV3_I2C_S3_CLK 69 +#define GCC_QUPV3_I2C_S3_CLK_SRC 70 +#define GCC_QUPV3_I2C_S4_CLK 71 +#define GCC_QUPV3_I2C_S4_CLK_SRC 72 +#define GCC_QUPV3_I2C_S5_CLK 73 +#define GCC_QUPV3_I2C_S5_CLK_SRC 74 +#define GCC_QUPV3_I2C_S6_CLK 75 +#define GCC_QUPV3_I2C_S6_CLK_SRC 76 +#define GCC_QUPV3_I2C_S7_CLK 77 +#define GCC_QUPV3_I2C_S7_CLK_SRC 78 +#define GCC_QUPV3_I2C_S8_CLK 79 +#define GCC_QUPV3_I2C_S8_CLK_SRC 80 +#define GCC_QUPV3_I2C_S9_CLK 81 +#define GCC_QUPV3_I2C_S9_CLK_SRC 82 +#define GCC_QUPV3_I2C_S_AHB_CLK 83 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 84 +#define GCC_QUPV3_WRAP1_CORE_CLK 85 +#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 86 +#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 87 +#define GCC_QUPV3_WRAP1_S0_CLK 88 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 89 +#define GCC_QUPV3_WRAP1_S1_CLK 90 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 91 +#define GCC_QUPV3_WRAP1_S2_CLK 92 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 93 +#define GCC_QUPV3_WRAP1_S3_CLK 94 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 95 +#define GCC_QUPV3_WRAP1_S4_CLK 96 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 97 +#define GCC_QUPV3_WRAP1_S5_CLK 98 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 99 +#define GCC_QUPV3_WRAP1_S6_CLK 100 +#define GCC_QUPV3_WRAP1_S6_CLK_SRC 101 +#define GCC_QUPV3_WRAP1_S7_CLK 102 +#define GCC_QUPV3_WRAP1_S7_CLK_SRC 103 +#define GCC_QUPV3_WRAP2_CORE_2X_CLK 104 +#define GCC_QUPV3_WRAP2_CORE_CLK 105 +#define GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC 106 +#define GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK 107 +#define GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK 108 +#define GCC_QUPV3_WRAP2_S0_CLK 109 +#define GCC_QUPV3_WRAP2_S0_CLK_SRC 110 +#define GCC_QUPV3_WRAP2_S1_CLK 111 +#define GCC_QUPV3_WRAP2_S1_CLK_SRC 112 +#define GCC_QUPV3_WRAP2_S2_CLK 113 +#define GCC_QUPV3_WRAP2_S2_CLK_SRC 114 +#define GCC_QUPV3_WRAP2_S3_CLK 115 +#define GCC_QUPV3_WRAP2_S3_CLK_SRC 116 +#define GCC_QUPV3_WRAP2_S4_CLK 117 +#define GCC_QUPV3_WRAP2_S4_CLK_SRC 118 +#define GCC_QUPV3_WRAP2_S5_CLK 119 +#define GCC_QUPV3_WRAP2_S5_CLK_SRC 120 +#define GCC_QUPV3_WRAP2_S6_CLK 121 +#define GCC_QUPV3_WRAP2_S6_CLK_SRC 122 +#define GCC_QUPV3_WRAP2_S7_CLK 123 +#define GCC_QUPV3_WRAP2_S7_CLK_SRC 124 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 125 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 126 +#define GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK 127 +#define GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK 128 +#define GCC_QUPV3_WRAP_2_M_AHB_CLK 129 +#define GCC_QUPV3_WRAP_2_S_AHB_CLK 130 +#define GCC_SDCC2_AHB_CLK 131 +#define GCC_SDCC2_APPS_CLK 132 +#define GCC_SDCC2_APPS_CLK_SRC 133 +#define GCC_SDCC4_AHB_CLK 134 +#define GCC_SDCC4_APPS_CLK 135 +#define GCC_SDCC4_APPS_CLK_SRC 136 +#define GCC_UFS_PHY_AHB_CLK 137 +#define GCC_UFS_PHY_AXI_CLK 138 +#define GCC_UFS_PHY_AXI_CLK_SRC 139 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 140 +#define GCC_UFS_PHY_ICE_CORE_CLK 141 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 142 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 143 +#define GCC_UFS_PHY_PHY_AUX_CLK 144 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 145 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 146 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 147 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 148 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 149 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 150 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 151 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 152 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 153 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 154 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 155 +#define GCC_USB30_PRIM_MASTER_CLK 156 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 157 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 158 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 159 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 160 +#define GCC_USB30_PRIM_SLEEP_CLK 161 +#define GCC_USB3_PRIM_PHY_AUX_CLK 162 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 163 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 164 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 165 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 166 +#define GCC_VIDEO_AHB_CLK 167 +#define GCC_VIDEO_AXI0_CLK 168 +#define GCC_VIDEO_AXI1_CLK 169 +#define GCC_VIDEO_XO_CLK 170 + +/* GCC power domains */ +#define GCC_PCIE_0_GDSC 0 +#define GCC_PCIE_0_PHY_GDSC 1 +#define GCC_UFS_MEM_PHY_GDSC 2 +#define GCC_UFS_PHY_GDSC 3 +#define GCC_USB30_PRIM_GDSC 4 +#define GCC_USB3_PHY_GDSC 5 + +/* GCC resets */ +#define GCC_CAMERA_BCR 0 +#define GCC_DISPLAY_BCR 1 +#define GCC_EVA_BCR 2 +#define GCC_GPU_BCR 3 +#define GCC_PCIE_0_BCR 4 +#define GCC_PCIE_0_LINK_DOWN_BCR 5 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6 +#define GCC_PCIE_0_PHY_BCR 7 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8 +#define GCC_PCIE_PHY_BCR 9 +#define GCC_PCIE_PHY_CFG_AHB_BCR 10 +#define GCC_PCIE_PHY_COM_BCR 11 +#define GCC_PCIE_RSCC_BCR 12 +#define GCC_PDM_BCR 13 +#define GCC_QUPV3_WRAPPER_1_BCR 14 +#define GCC_QUPV3_WRAPPER_2_BCR 15 +#define GCC_QUPV3_WRAPPER_I2C_BCR 16 +#define GCC_QUSB2PHY_PRIM_BCR 17 +#define GCC_QUSB2PHY_SEC_BCR 18 +#define GCC_SDCC2_BCR 19 +#define GCC_SDCC4_BCR 20 +#define GCC_UFS_PHY_BCR 21 +#define GCC_USB30_PRIM_BCR 22 +#define GCC_USB3_DP_PHY_PRIM_BCR 23 +#define GCC_USB3_DP_PHY_SEC_BCR 24 +#define GCC_USB3_PHY_PRIM_BCR 25 +#define GCC_USB3_PHY_SEC_BCR 26 +#define GCC_USB3PHY_PHY_PRIM_BCR 27 +#define GCC_USB3PHY_PHY_SEC_BCR 28 +#define GCC_VIDEO_AXI0_CLK_ARES 29 +#define GCC_VIDEO_AXI1_CLK_ARES 30 +#define GCC_VIDEO_BCR 31 +#define GCC_EVA_AXI0_CLK_ARES 32 +#define GCC_EVA_AXI0C_CLK_ARES 33 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8750-tcsr.h b/include/dt-bindings/clock/qcom,sm8750-tcsr.h new file mode 100644 index 000000000000..1c502ac7c7f4 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8750-tcsr.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8750_H +#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8750_H + +/* TCSR_CC clocks */ +#define TCSR_PCIE_0_CLKREF_EN 0 +#define TCSR_UFS_CLKREF_EN 1 +#define TCSR_USB2_CLKREF_EN 2 +#define TCSR_USB3_CLKREF_EN 3 + +#endif diff --git a/include/dt-bindings/clock/qcom,x1e80100-gpucc.h b/include/dt-bindings/clock/qcom,x1e80100-gpucc.h index 61a3a8f3ac43..27b8f50541fd 100644 --- a/include/dt-bindings/clock/qcom,x1e80100-gpucc.h +++ b/include/dt-bindings/clock/qcom,x1e80100-gpucc.h @@ -33,9 +33,22 @@ #define GPU_CC_SLEEP_CLK 23 #define GPU_CC_XO_CLK_SRC 24 #define GPU_CC_XO_DIV_CLK_SRC 25 +#define GPU_CC_CX_ACCU_SHIFT_CLK 26 +#define GPU_CC_GX_ACCU_SHIFT_CLK 27 /* GDSCs */ #define GPU_CX_GDSC 0 #define GPU_GX_GDSC 1 +/* GPU_CC resets */ +#define GPU_CC_ACD_BCR 0 +#define GPU_CC_CB_BCR 1 +#define GPU_CC_CX_BCR 2 +#define GPU_CC_FAST_HUB_BCR 3 +#define GPU_CC_FF_BCR 4 +#define GPU_CC_GFX3D_AON_BCR 5 +#define GPU_CC_GMU_BCR 6 +#define GPU_CC_GX_BCR 7 +#define GPU_CC_XO_BCR 8 + #endif diff --git a/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h b/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h index 67774eafad06..4cc8fc34b23c 100644 --- a/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h +++ b/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h @@ -2,12 +2,12 @@ * * Copyright (C) 2024 Renesas Electronics Corp. */ -#ifndef __DT_BINDINGS_CLOCK_R9A08G045_VBATTB_H__ -#define __DT_BINDINGS_CLOCK_R9A08G045_VBATTB_H__ +#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A08G045_VBATTB_H__ +#define __DT_BINDINGS_CLOCK_RENESAS_R9A08G045_VBATTB_H__ #define VBATTB_XC 0 #define VBATTB_XBYP 1 #define VBATTB_MUX 2 #define VBATTB_VBATTCLK 3 -#endif /* __DT_BINDINGS_CLOCK_R9A08G045_VBATTB_H__ */ +#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A08G045_VBATTB_H__ */ diff --git a/include/dt-bindings/clock/renesas,r9a09g047-cpg.h b/include/dt-bindings/clock/renesas,r9a09g047-cpg.h new file mode 100644 index 000000000000..1d031bf6bf03 --- /dev/null +++ b/include/dt-bindings/clock/renesas,r9a09g047-cpg.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + * + * Copyright (C) 2024 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G047_CPG_H__ +#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G047_CPG_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* Core Clock list */ +#define R9A09G047_SYS_0_PCLK 0 +#define R9A09G047_CA55_0_CORECLK0 1 +#define R9A09G047_CA55_0_CORECLK1 2 +#define R9A09G047_CA55_0_CORECLK2 3 +#define R9A09G047_CA55_0_CORECLK3 4 +#define R9A09G047_CA55_0_PERIPHCLK 5 +#define R9A09G047_CM33_CLK0 6 +#define R9A09G047_CST_0_SWCLKTCK 7 +#define R9A09G047_IOTOP_0_SHCLK 8 + +#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G047_CPG_H__ */ diff --git a/include/dt-bindings/clock/samsung,exynos990.h b/include/dt-bindings/clock/samsung,exynos990.h new file mode 100644 index 000000000000..307215a3f3ed --- /dev/null +++ b/include/dt-bindings/clock/samsung,exynos990.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2024 Igor Belwon <igor.belwon@mentallysanemainliners.org> + * + * Device Tree binding constants for Exynos990 clock controller. + */ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS_990_H +#define _DT_BINDINGS_CLOCK_EXYNOS_990_H + +/* CMU_TOP */ +#define CLK_FOUT_SHARED0_PLL 1 +#define CLK_FOUT_SHARED1_PLL 2 +#define CLK_FOUT_SHARED2_PLL 3 +#define CLK_FOUT_SHARED3_PLL 4 +#define CLK_FOUT_SHARED4_PLL 5 +#define CLK_FOUT_G3D_PLL 6 +#define CLK_FOUT_MMC_PLL 7 +#define CLK_MOUT_PLL_SHARED0 8 +#define CLK_MOUT_PLL_SHARED1 9 +#define CLK_MOUT_PLL_SHARED2 10 +#define CLK_MOUT_PLL_SHARED3 11 +#define CLK_MOUT_PLL_SHARED4 12 +#define CLK_MOUT_PLL_MMC 13 +#define CLK_MOUT_PLL_G3D 14 +#define CLK_MOUT_CMU_APM_BUS 15 +#define CLK_MOUT_CMU_AUD_CPU 16 +#define CLK_MOUT_CMU_BUS0_BUS 17 +#define CLK_MOUT_CMU_BUS1_BUS 18 +#define CLK_MOUT_CMU_BUS1_SSS 19 +#define CLK_MOUT_CMU_CIS_CLK0 20 +#define CLK_MOUT_CMU_CIS_CLK1 21 +#define CLK_MOUT_CMU_CIS_CLK2 22 +#define CLK_MOUT_CMU_CIS_CLK3 23 +#define CLK_MOUT_CMU_CIS_CLK4 24 +#define CLK_MOUT_CMU_CIS_CLK5 25 +#define CLK_MOUT_CMU_CMU_BOOST 26 +#define CLK_MOUT_CMU_CORE_BUS 27 +#define CLK_MOUT_CMU_CPUCL0_DBG_BUS 28 +#define CLK_MOUT_CMU_CPUCL0_SWITCH 29 +#define CLK_MOUT_CMU_CPUCL1_SWITCH 30 +#define CLK_MOUT_CMU_CPUCL2_BUSP 31 +#define CLK_MOUT_CMU_CPUCL2_SWITCH 32 +#define CLK_MOUT_CMU_CSIS_BUS 33 +#define CLK_MOUT_CMU_CSIS_OIS_MCU 34 +#define CLK_MOUT_CMU_DNC_BUS 35 +#define CLK_MOUT_CMU_DNC_BUSM 36 +#define CLK_MOUT_CMU_DNS_BUS 37 +#define CLK_MOUT_CMU_DPU 38 +#define CLK_MOUT_CMU_DPU_ALT 39 +#define CLK_MOUT_CMU_DSP_BUS 40 +#define CLK_MOUT_CMU_G2D_G2D 41 +#define CLK_MOUT_CMU_G2D_MSCL 42 +#define CLK_MOUT_CMU_HPM 43 +#define CLK_MOUT_CMU_HSI0_BUS 44 +#define CLK_MOUT_CMU_HSI0_DPGTC 45 +#define CLK_MOUT_CMU_HSI0_USB31DRD 46 +#define CLK_MOUT_CMU_HSI0_USBDP_DEBUG 47 +#define CLK_MOUT_CMU_HSI1_BUS 48 +#define CLK_MOUT_CMU_HSI1_MMC_CARD 49 +#define CLK_MOUT_CMU_HSI1_PCIE 50 +#define CLK_MOUT_CMU_HSI1_UFS_CARD 51 +#define CLK_MOUT_CMU_HSI1_UFS_EMBD 52 +#define CLK_MOUT_CMU_HSI2_BUS 53 +#define CLK_MOUT_CMU_HSI2_PCIE 54 +#define CLK_MOUT_CMU_IPP_BUS 55 +#define CLK_MOUT_CMU_ITP_BUS 56 +#define CLK_MOUT_CMU_MCSC_BUS 57 +#define CLK_MOUT_CMU_MCSC_GDC 58 +#define CLK_MOUT_CMU_CMU_BOOST_CPU 59 +#define CLK_MOUT_CMU_MFC0_MFC0 60 +#define CLK_MOUT_CMU_MFC0_WFD 61 +#define CLK_MOUT_CMU_MIF_BUSP 62 +#define CLK_MOUT_CMU_MIF_SWITCH 63 +#define CLK_MOUT_CMU_NPU_BUS 64 +#define CLK_MOUT_CMU_PERIC0_BUS 65 +#define CLK_MOUT_CMU_PERIC0_IP 66 +#define CLK_MOUT_CMU_PERIC1_BUS 67 +#define CLK_MOUT_CMU_PERIC1_IP 68 +#define CLK_MOUT_CMU_PERIS_BUS 69 +#define CLK_MOUT_CMU_SSP_BUS 70 +#define CLK_MOUT_CMU_TNR_BUS 71 +#define CLK_MOUT_CMU_VRA_BUS 72 +#define CLK_DOUT_CMU_APM_BUS 73 +#define CLK_DOUT_CMU_AUD_CPU 74 +#define CLK_DOUT_CMU_BUS0_BUS 75 +#define CLK_DOUT_CMU_BUS1_BUS 76 +#define CLK_DOUT_CMU_BUS1_SSS 77 +#define CLK_DOUT_CMU_CIS_CLK0 78 +#define CLK_DOUT_CMU_CIS_CLK1 79 +#define CLK_DOUT_CMU_CIS_CLK2 80 +#define CLK_DOUT_CMU_CIS_CLK3 81 +#define CLK_DOUT_CMU_CIS_CLK4 82 +#define CLK_DOUT_CMU_CIS_CLK5 83 +#define CLK_DOUT_CMU_CMU_BOOST 84 +#define CLK_DOUT_CMU_CORE_BUS 85 +#define CLK_DOUT_CMU_CPUCL0_DBG_BUS 86 +#define CLK_DOUT_CMU_CPUCL0_SWITCH 87 +#define CLK_DOUT_CMU_CPUCL1_SWITCH 88 +#define CLK_DOUT_CMU_CPUCL2_BUSP 89 +#define CLK_DOUT_CMU_CPUCL2_SWITCH 90 +#define CLK_DOUT_CMU_CSIS_BUS 91 +#define CLK_DOUT_CMU_CSIS_OIS_MCU 92 +#define CLK_DOUT_CMU_DNC_BUS 93 +#define CLK_DOUT_CMU_DNC_BUSM 94 +#define CLK_DOUT_CMU_DNS_BUS 95 +#define CLK_DOUT_CMU_DSP_BUS 96 +#define CLK_DOUT_CMU_G2D_G2D 97 +#define CLK_DOUT_CMU_G2D_MSCL 98 +#define CLK_DOUT_CMU_G3D_SWITCH 99 +#define CLK_DOUT_CMU_HPM 100 +#define CLK_DOUT_CMU_HSI0_BUS 101 +#define CLK_DOUT_CMU_HSI0_DPGTC 102 +#define CLK_DOUT_CMU_HSI0_USB31DRD 103 +#define CLK_DOUT_CMU_HSI0_USBDP_DEBUG 104 +#define CLK_DOUT_CMU_HSI1_BUS 105 +#define CLK_DOUT_CMU_HSI1_MMC_CARD 106 +#define CLK_DOUT_CMU_HSI1_PCIE 107 +#define CLK_DOUT_CMU_HSI1_UFS_CARD 108 +#define CLK_DOUT_CMU_HSI1_UFS_EMBD 109 +#define CLK_DOUT_CMU_HSI2_BUS 110 +#define CLK_DOUT_CMU_HSI2_PCIE 111 +#define CLK_DOUT_CMU_IPP_BUS 112 +#define CLK_DOUT_CMU_ITP_BUS 113 +#define CLK_DOUT_CMU_MCSC_BUS 114 +#define CLK_DOUT_CMU_MCSC_GDC 115 +#define CLK_DOUT_CMU_CMU_BOOST_CPU 116 +#define CLK_DOUT_CMU_MFC0_MFC0 117 +#define CLK_DOUT_CMU_MFC0_WFD 118 +#define CLK_DOUT_CMU_MIF_BUSP 119 +#define CLK_DOUT_CMU_NPU_BUS 120 +#define CLK_DOUT_CMU_OTP 121 +#define CLK_DOUT_CMU_PERIC0_BUS 122 +#define CLK_DOUT_CMU_PERIC0_IP 123 +#define CLK_DOUT_CMU_PERIC1_BUS 124 +#define CLK_DOUT_CMU_PERIC1_IP 125 +#define CLK_DOUT_CMU_PERIS_BUS 126 +#define CLK_DOUT_CMU_SSP_BUS 127 +#define CLK_DOUT_CMU_TNR_BUS 128 +#define CLK_DOUT_CMU_VRA_BUS 129 +#define CLK_DOUT_CMU_DPU 130 +#define CLK_DOUT_CMU_DPU_ALT 131 +#define CLK_DOUT_CMU_SHARED0_DIV2 132 +#define CLK_DOUT_CMU_SHARED0_DIV3 133 +#define CLK_DOUT_CMU_SHARED0_DIV4 134 +#define CLK_DOUT_CMU_SHARED1_DIV2 135 +#define CLK_DOUT_CMU_SHARED1_DIV3 136 +#define CLK_DOUT_CMU_SHARED1_DIV4 137 +#define CLK_DOUT_CMU_SHARED2_DIV2 138 +#define CLK_DOUT_CMU_SHARED4_DIV2 139 +#define CLK_DOUT_CMU_SHARED4_DIV3 140 +#define CLK_DOUT_CMU_SHARED4_DIV4 141 +#define CLK_GOUT_CMU_G3D_BUS 142 +#define CLK_GOUT_CMU_MIF_SWITCH 143 +#define CLK_GOUT_CMU_APM_BUS 144 +#define CLK_GOUT_CMU_AUD_CPU 145 +#define CLK_GOUT_CMU_BUS0_BUS 146 +#define CLK_GOUT_CMU_BUS1_BUS 147 +#define CLK_GOUT_CMU_BUS1_SSS 148 +#define CLK_GOUT_CMU_CIS_CLK0 149 +#define CLK_GOUT_CMU_CIS_CLK1 150 +#define CLK_GOUT_CMU_CIS_CLK2 151 +#define CLK_GOUT_CMU_CIS_CLK3 152 +#define CLK_GOUT_CMU_CIS_CLK4 153 +#define CLK_GOUT_CMU_CIS_CLK5 154 +#define CLK_GOUT_CMU_CORE_BUS 155 +#define CLK_GOUT_CMU_CPUCL0_DBG_BUS 156 +#define CLK_GOUT_CMU_CPUCL0_SWITCH 157 +#define CLK_GOUT_CMU_CPUCL1_SWITCH 158 +#define CLK_GOUT_CMU_CPUCL2_BUSP 159 +#define CLK_GOUT_CMU_CPUCL2_SWITCH 160 +#define CLK_GOUT_CMU_CSIS_BUS 161 +#define CLK_GOUT_CMU_CSIS_OIS_MCU 162 +#define CLK_GOUT_CMU_DNC_BUS 163 +#define CLK_GOUT_CMU_DNC_BUSM 164 +#define CLK_GOUT_CMU_DNS_BUS 165 +#define CLK_GOUT_CMU_DPU 166 +#define CLK_GOUT_CMU_DPU_BUS 167 +#define CLK_GOUT_CMU_DSP_BUS 168 +#define CLK_GOUT_CMU_G2D_G2D 169 +#define CLK_GOUT_CMU_G2D_MSCL 170 +#define CLK_GOUT_CMU_G3D_SWITCH 171 +#define CLK_GOUT_CMU_HPM 172 +#define CLK_GOUT_CMU_HSI0_BUS 173 +#define CLK_GOUT_CMU_HSI0_DPGTC 174 +#define CLK_GOUT_CMU_HSI0_USB31DRD 175 +#define CLK_GOUT_CMU_HSI0_USBDP_DEBUG 176 +#define CLK_GOUT_CMU_HSI1_BUS 177 +#define CLK_GOUT_CMU_HSI1_MMC_CARD 178 +#define CLK_GOUT_CMU_HSI1_PCIE 179 +#define CLK_GOUT_CMU_HSI1_UFS_CARD 180 +#define CLK_GOUT_CMU_HSI1_UFS_EMBD 181 +#define CLK_GOUT_CMU_HSI2_BUS 182 +#define CLK_GOUT_CMU_HSI2_PCIE 183 +#define CLK_GOUT_CMU_IPP_BUS 184 +#define CLK_GOUT_CMU_ITP_BUS 185 +#define CLK_GOUT_CMU_MCSC_BUS 186 +#define CLK_GOUT_CMU_MCSC_GDC 187 +#define CLK_GOUT_CMU_MFC0_MFC0 188 +#define CLK_GOUT_CMU_MFC0_WFD 189 +#define CLK_GOUT_CMU_MIF_BUSP 190 +#define CLK_GOUT_CMU_NPU_BUS 191 +#define CLK_GOUT_CMU_PERIC0_BUS 192 +#define CLK_GOUT_CMU_PERIC0_IP 193 +#define CLK_GOUT_CMU_PERIC1_BUS 194 +#define CLK_GOUT_CMU_PERIC1_IP 195 +#define CLK_GOUT_CMU_PERIS_BUS 196 +#define CLK_GOUT_CMU_SSP_BUS 197 +#define CLK_GOUT_CMU_TNR_BUS 198 +#define CLK_GOUT_CMU_VRA_BUS 199 + +/* CMU_HSI0 */ +#define CLK_MOUT_HSI0_BUS_USER 1 +#define CLK_MOUT_HSI0_USB31DRD_USER 2 +#define CLK_MOUT_HSI0_USBDP_DEBUG_USER 3 +#define CLK_MOUT_HSI0_DPGTC_USER 4 +#define CLK_GOUT_HSI0_DP_LINK_DP_GTC_CLK 5 +#define CLK_GOUT_HSI0_DP_LINK_PCLK 6 +#define CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK 7 +#define CLK_GOUT_HSI0_LHM_AXI_P_HSI0_CLK 8 +#define CLK_GOUT_HSI0_PPMU_HSI0_BUS1_ACLK 9 +#define CLK_GOUT_HSI0_PPMU_HSI0_BUS1_PCLK 10 +#define CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK 11 +#define CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2 12 +#define CLK_GOUT_HSI0_SYSREG_HSI0_PCLK 13 +#define CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL 14 +#define CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY 15 +#define CLK_GOUT_HSI0_USB31DRD_USB31DRD_REF_CLK_40 16 +#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_REF_SOC_PLL 17 +#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_SCL_APB 18 +#define CLK_GOUT_HSI0_USB31DRD_USBPCS_APB_CLK 19 +#define CLK_GOUT_HSI0_VGEN_LITE_HSI0_CLK 20 +#define CLK_GOUT_HSI0_CMU_HSI0_PCLK 21 +#define CLK_GOUT_HSI0_XIU_D_HSI0_ACLK 22 + +#endif diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h index e5dad050d518..b6ff9c68cb3f 100644 --- a/include/dt-bindings/clock/stm32fx-clock.h +++ b/include/dt-bindings/clock/stm32fx-clock.h @@ -10,7 +10,7 @@ * List of clocks which are not derived from system clock (SYSCLOCK) * * The index of these clocks is the secondary index of DT bindings - * (see Documentation/devicetree/bindings/clock/st,stm32-rcc.txt) + * (see Documentation/devicetree/bindings/clock/st,stm32-rcc.yaml) * * e.g: <assigned-clocks = <&rcc 1 CLK_LSE>; diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h index 175892189e9d..4f220ea7a23c 100644 --- a/include/dt-bindings/clock/sun50i-a64-ccu.h +++ b/include/dt-bindings/clock/sun50i-a64-ccu.h @@ -44,7 +44,9 @@ #define _DT_BINDINGS_CLK_SUN50I_A64_H_ #define CLK_PLL_VIDEO0 7 +#define CLK_PLL_VIDEO0_2X 8 #define CLK_PLL_PERIPH0 11 +#define CLK_PLL_MIPI 17 #define CLK_CPUX 21 #define CLK_BUS_MIPI_DSI 28 diff --git a/include/dt-bindings/iio/adi,ad4695.h b/include/dt-bindings/iio/adc/adi,ad4695.h index 9fbef542bf67..9fbef542bf67 100644 --- a/include/dt-bindings/iio/adi,ad4695.h +++ b/include/dt-bindings/iio/adc/adi,ad4695.h diff --git a/include/dt-bindings/interconnect/qcom,ipq5424.h b/include/dt-bindings/interconnect/qcom,ipq5424.h new file mode 100644 index 000000000000..a770356112ee --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,ipq5424.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +#ifndef INTERCONNECT_QCOM_IPQ5424_H +#define INTERCONNECT_QCOM_IPQ5424_H + +#define MASTER_ANOC_PCIE0 0 +#define SLAVE_ANOC_PCIE0 1 +#define MASTER_CNOC_PCIE0 2 +#define SLAVE_CNOC_PCIE0 3 +#define MASTER_ANOC_PCIE1 4 +#define SLAVE_ANOC_PCIE1 5 +#define MASTER_CNOC_PCIE1 6 +#define SLAVE_CNOC_PCIE1 7 +#define MASTER_ANOC_PCIE2 8 +#define SLAVE_ANOC_PCIE2 9 +#define MASTER_CNOC_PCIE2 10 +#define SLAVE_CNOC_PCIE2 11 +#define MASTER_ANOC_PCIE3 12 +#define SLAVE_ANOC_PCIE3 13 +#define MASTER_CNOC_PCIE3 14 +#define SLAVE_CNOC_PCIE3 15 +#define MASTER_CNOC_USB 16 +#define SLAVE_CNOC_USB 17 + +#endif /* INTERCONNECT_QCOM_IPQ5424_H */ diff --git a/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h new file mode 100644 index 000000000000..30563952a646 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8750_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8750_H + +#define MASTER_QSPI_0 0 +#define MASTER_QUP_1 1 +#define MASTER_QUP_3 2 +#define MASTER_SDCC_4 3 +#define MASTER_UFS_MEM 4 +#define MASTER_USB3_0 5 +#define SLAVE_A1NOC_SNOC 6 + +#define MASTER_QDSS_BAM 0 +#define MASTER_QUP_2 1 +#define MASTER_CRYPTO 2 +#define MASTER_IPA 3 +#define MASTER_SOCCP_AGGR_NOC 4 +#define MASTER_SP 5 +#define MASTER_QDSS_ETR 6 +#define MASTER_QDSS_ETR_1 7 +#define MASTER_SDCC_2 8 +#define SLAVE_A2NOC_SNOC 9 + +#define MASTER_QUP_CORE_0 0 +#define MASTER_QUP_CORE_1 1 +#define MASTER_QUP_CORE_2 2 +#define SLAVE_QUP_CORE_0 3 +#define SLAVE_QUP_CORE_1 4 +#define SLAVE_QUP_CORE_2 5 + +#define MASTER_CNOC_CFG 0 +#define SLAVE_AHB2PHY_SOUTH 1 +#define SLAVE_AHB2PHY_NORTH 2 +#define SLAVE_CAMERA_CFG 3 +#define SLAVE_CLK_CTL 4 +#define SLAVE_CRYPTO_0_CFG 5 +#define SLAVE_DISPLAY_CFG 6 +#define SLAVE_EVA_CFG 7 +#define SLAVE_GFX3D_CFG 8 +#define SLAVE_I2C 9 +#define SLAVE_I3C_IBI0_CFG 10 +#define SLAVE_I3C_IBI1_CFG 11 +#define SLAVE_IMEM_CFG 12 +#define SLAVE_CNOC_MSS 13 +#define SLAVE_PCIE_CFG 14 +#define SLAVE_PRNG 15 +#define SLAVE_QDSS_CFG 16 +#define SLAVE_QSPI_0 17 +#define SLAVE_QUP_3 18 +#define SLAVE_QUP_1 19 +#define SLAVE_QUP_2 20 +#define SLAVE_SDCC_2 21 +#define SLAVE_SDCC_4 22 +#define SLAVE_SPSS_CFG 23 +#define SLAVE_TCSR 24 +#define SLAVE_TLMM 25 +#define SLAVE_UFS_MEM_CFG 26 +#define SLAVE_USB3_0 27 +#define SLAVE_VENUS_CFG 28 +#define SLAVE_VSENSE_CTRL_CFG 29 +#define SLAVE_CNOC_MNOC_CFG 30 +#define SLAVE_PCIE_ANOC_CFG 31 +#define SLAVE_QDSS_STM 32 +#define SLAVE_TCU 33 + +#define MASTER_GEM_NOC_CNOC 0 +#define MASTER_GEM_NOC_PCIE_SNOC 1 +#define SLAVE_AOSS 2 +#define SLAVE_IPA_CFG 3 +#define SLAVE_IPC_ROUTER_CFG 4 +#define SLAVE_SOCCP 5 +#define SLAVE_TME_CFG 6 +#define SLAVE_APPSS 7 +#define SLAVE_CNOC_CFG 8 +#define SLAVE_DDRSS_CFG 9 +#define SLAVE_BOOT_IMEM 10 +#define SLAVE_IMEM 11 +#define SLAVE_BOOT_IMEM_2 12 +#define SLAVE_SERVICE_CNOC 13 +#define SLAVE_PCIE_0 14 + +#define MASTER_GPU_TCU 0 +#define MASTER_SYS_TCU 1 +#define MASTER_APPSS_PROC 2 +#define MASTER_GFX3D 3 +#define MASTER_LPASS_GEM_NOC 4 +#define MASTER_MSS_PROC 5 +#define MASTER_MNOC_HF_MEM_NOC 6 +#define MASTER_MNOC_SF_MEM_NOC 7 +#define MASTER_COMPUTE_NOC 8 +#define MASTER_ANOC_PCIE_GEM_NOC 9 +#define MASTER_SNOC_SF_MEM_NOC 10 +#define MASTER_UBWC_P 11 +#define MASTER_GIC 12 +#define SLAVE_UBWC_P 13 +#define SLAVE_GEM_NOC_CNOC 14 +#define SLAVE_LLCC 15 +#define SLAVE_MEM_NOC_PCIE_SNOC 16 + +#define MASTER_LPIAON_NOC 0 +#define SLAVE_LPASS_GEM_NOC 1 + +#define MASTER_LPASS_LPINOC 0 +#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1 + +#define MASTER_LPASS_PROC 0 +#define SLAVE_LPICX_NOC_LPIAON_NOC 1 + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +#define MASTER_CAMNOC_HF 0 +#define MASTER_CAMNOC_NRT_ICP_SF 1 +#define MASTER_CAMNOC_RT_CDM_SF 2 +#define MASTER_CAMNOC_SF 3 +#define MASTER_MDP 4 +#define MASTER_CDSP_HCP 5 +#define MASTER_VIDEO_CV_PROC 6 +#define MASTER_VIDEO_EVA 7 +#define MASTER_VIDEO_MVP 8 +#define MASTER_VIDEO_V_PROC 9 +#define MASTER_CNOC_MNOC_CFG 10 +#define SLAVE_MNOC_HF_MEM_NOC 11 +#define SLAVE_MNOC_SF_MEM_NOC 12 +#define SLAVE_SERVICE_MNOC 13 + +#define MASTER_CDSP_PROC 0 +#define SLAVE_CDSP_MEM_NOC 1 + +#define MASTER_PCIE_ANOC_CFG 0 +#define MASTER_PCIE_0 1 +#define SLAVE_ANOC_PCIE_GEM_NOC 2 +#define SLAVE_SERVICE_PCIE_ANOC 3 + +#define MASTER_A1NOC_SNOC 0 +#define MASTER_A2NOC_SNOC 1 +#define SLAVE_SNOC_GEM_NOC_SF 2 + +#endif diff --git a/include/dt-bindings/media/video-interfaces.h b/include/dt-bindings/media/video-interfaces.h index 68ac4e05e37f..88b9d05d8075 100644 --- a/include/dt-bindings/media/video-interfaces.h +++ b/include/dt-bindings/media/video-interfaces.h @@ -13,4 +13,11 @@ #define MEDIA_BUS_TYPE_PARALLEL 5 #define MEDIA_BUS_TYPE_BT656 6 +#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ABC 0 +#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ACB 1 +#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_BAC 2 +#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_BCA 3 +#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_CAB 4 +#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_CBA 5 + #endif /* __DT_BINDINGS_MEDIA_VIDEO_INTERFACES_H__ */ diff --git a/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h b/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h new file mode 100644 index 000000000000..5917096720bd --- /dev/null +++ b/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * This header provides constants for Renesas RZ/G3E family pinctrl bindings. + * + * Copyright (C) 2024 Renesas Electronics Corp. + * + */ + +#ifndef __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__ +#define __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__ + +#include <dt-bindings/pinctrl/rzg2l-pinctrl.h> + +/* RZG3E_Px = Offset address of PFC_P_mn - 0x20 */ +#define RZG3E_P0 0 +#define RZG3E_P1 1 +#define RZG3E_P2 2 +#define RZG3E_P3 3 +#define RZG3E_P4 4 +#define RZG3E_P5 5 +#define RZG3E_P6 6 +#define RZG3E_P7 7 +#define RZG3E_P8 8 +#define RZG3E_PA 10 +#define RZG3E_PB 11 +#define RZG3E_PC 12 +#define RZG3E_PD 13 +#define RZG3E_PE 14 +#define RZG3E_PF 15 +#define RZG3E_PG 16 +#define RZG3E_PH 17 +#define RZG3E_PJ 19 +#define RZG3E_PK 20 +#define RZG3E_PL 21 +#define RZG3E_PM 22 +#define RZG3E_PS 28 + +#define RZG3E_PORT_PINMUX(b, p, f) RZG2L_PORT_PINMUX(RZG3E_P##b, p, f) +#define RZG3E_GPIO(port, pin) RZG2L_GPIO(RZG3E_P##port, pin) + +#endif /* __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__ */ diff --git a/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h b/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h new file mode 100644 index 000000000000..2e83bf43160b --- /dev/null +++ b/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * This header provides constants for Renesas RZ/V2H family pinctrl bindings. + * + * Copyright (C) 2024 Renesas Electronics Corp. + * + */ + +#ifndef __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__ +#define __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__ + +#include <dt-bindings/pinctrl/rzg2l-pinctrl.h> + +/* RZV2H_Px = Offset address of PFC_P_mn - 0x20 */ +#define RZV2H_P0 0 +#define RZV2H_P1 1 +#define RZV2H_P2 2 +#define RZV2H_P3 3 +#define RZV2H_P4 4 +#define RZV2H_P5 5 +#define RZV2H_P6 6 +#define RZV2H_P7 7 +#define RZV2H_P8 8 +#define RZV2H_P9 9 +#define RZV2H_PA 10 +#define RZV2H_PB 11 + +#define RZV2H_PORT_PINMUX(b, p, f) RZG2L_PORT_PINMUX(RZV2H_P##b, p, f) +#define RZV2H_GPIO(port, pin) RZG2L_GPIO(RZV2H_P##port, pin) + +#endif /* __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__ */ diff --git a/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h b/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h new file mode 100644 index 000000000000..7693552f1507 --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (c) 2024, SaluteDevices. All Rights Reserved. + * + * Author: Jan Dakinevich <jan.dakinevich@salutedevices.com> + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H + +#define AUD_RESET_DDRARB 0 +#define AUD_RESET_TDMIN_A 1 +#define AUD_RESET_TDMIN_B 2 +#define AUD_RESET_TDMIN_LB 3 +#define AUD_RESET_LOOPBACK 4 +#define AUD_RESET_TDMOUT_A 5 +#define AUD_RESET_TDMOUT_B 6 +#define AUD_RESET_FRDDR_A 7 +#define AUD_RESET_FRDDR_B 8 +#define AUD_RESET_TODDR_A 9 +#define AUD_RESET_TODDR_B 10 +#define AUD_RESET_SPDIFIN 11 +#define AUD_RESET_RESAMPLE 12 +#define AUD_RESET_EQDRC 13 +#define AUD_RESET_LOCKER 14 +#define AUD_RESET_TOACODEC 30 +#define AUD_RESET_CLKTREE 31 + +#define AUD_VAD_RESET_DDRARB 0 +#define AUD_VAD_RESET_PDM 1 +#define AUD_VAD_RESET_TDMIN_VAD 2 +#define AUD_VAD_RESET_TODDR_VAD 3 +#define AUD_VAD_RESET_TOVAD 4 +#define AUD_VAD_RESET_CLKTREE 5 + +#endif /* _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H */ diff --git a/include/dt-bindings/sound/qcom,wcd9335.h b/include/dt-bindings/sound/qcom,wcd9335.h index f5e9f1db091e..4fc68aeb9e04 100644 --- a/include/dt-bindings/sound/qcom,wcd9335.h +++ b/include/dt-bindings/sound/qcom,wcd9335.h @@ -10,6 +10,5 @@ #define AIF3_PB 4 #define AIF3_CAP 5 #define AIF4_PB 6 -#define NUM_CODEC_DAIS 7 #endif diff --git a/include/hyperv/hvgdk.h b/include/hyperv/hvgdk.h new file mode 100644 index 000000000000..dd6d4939ea29 --- /dev/null +++ b/include/hyperv/hvgdk.h @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Type definitions for the Microsoft Hypervisor. + */ +#ifndef _HV_HVGDK_H +#define _HV_HVGDK_H + +#include "hvgdk_mini.h" +#include "hvgdk_ext.h" + +/* + * The guest OS needs to register the guest ID with the hypervisor. + * The guest ID is a 64 bit entity and the structure of this ID is + * specified in the Hyper-V TLFS specification. + * + * While the current guideline does not specify how Linux guest ID(s) + * need to be generated, our plan is to publish the guidelines for + * Linux and other guest operating systems that currently are hosted + * on Hyper-V. The implementation here conforms to this yet + * unpublished guidelines. + * + * Bit(s) + * 63 - Indicates if the OS is Open Source or not; 1 is Open Source + * 62:56 - Os Type; Linux is 0x100 + * 55:48 - Distro specific identification + * 47:16 - Linux kernel version number + * 15:0 - Distro specific identification + */ + +#define HV_LINUX_VENDOR_ID 0x8100 + +/* HV_VMX_ENLIGHTENED_VMCS */ +struct hv_enlightened_vmcs { + u32 revision_id; + u32 abort; + + u16 host_es_selector; + u16 host_cs_selector; + u16 host_ss_selector; + u16 host_ds_selector; + u16 host_fs_selector; + u16 host_gs_selector; + u16 host_tr_selector; + + u16 padding16_1; + + u64 host_ia32_pat; + u64 host_ia32_efer; + + u64 host_cr0; + u64 host_cr3; + u64 host_cr4; + + u64 host_ia32_sysenter_esp; + u64 host_ia32_sysenter_eip; + u64 host_rip; + u32 host_ia32_sysenter_cs; + + u32 pin_based_vm_exec_control; + u32 vm_exit_controls; + u32 secondary_vm_exec_control; + + u64 io_bitmap_a; + u64 io_bitmap_b; + u64 msr_bitmap; + + u16 guest_es_selector; + u16 guest_cs_selector; + u16 guest_ss_selector; + u16 guest_ds_selector; + u16 guest_fs_selector; + u16 guest_gs_selector; + u16 guest_ldtr_selector; + u16 guest_tr_selector; + + u32 guest_es_limit; + u32 guest_cs_limit; + u32 guest_ss_limit; + u32 guest_ds_limit; + u32 guest_fs_limit; + u32 guest_gs_limit; + u32 guest_ldtr_limit; + u32 guest_tr_limit; + u32 guest_gdtr_limit; + u32 guest_idtr_limit; + + u32 guest_es_ar_bytes; + u32 guest_cs_ar_bytes; + u32 guest_ss_ar_bytes; + u32 guest_ds_ar_bytes; + u32 guest_fs_ar_bytes; + u32 guest_gs_ar_bytes; + u32 guest_ldtr_ar_bytes; + u32 guest_tr_ar_bytes; + + u64 guest_es_base; + u64 guest_cs_base; + u64 guest_ss_base; + u64 guest_ds_base; + u64 guest_fs_base; + u64 guest_gs_base; + u64 guest_ldtr_base; + u64 guest_tr_base; + u64 guest_gdtr_base; + u64 guest_idtr_base; + + u64 padding64_1[3]; + + u64 vm_exit_msr_store_addr; + u64 vm_exit_msr_load_addr; + u64 vm_entry_msr_load_addr; + + u64 cr3_target_value0; + u64 cr3_target_value1; + u64 cr3_target_value2; + u64 cr3_target_value3; + + u32 page_fault_error_code_mask; + u32 page_fault_error_code_match; + + u32 cr3_target_count; + u32 vm_exit_msr_store_count; + u32 vm_exit_msr_load_count; + u32 vm_entry_msr_load_count; + + u64 tsc_offset; + u64 virtual_apic_page_addr; + u64 vmcs_link_pointer; + + u64 guest_ia32_debugctl; + u64 guest_ia32_pat; + u64 guest_ia32_efer; + + u64 guest_pdptr0; + u64 guest_pdptr1; + u64 guest_pdptr2; + u64 guest_pdptr3; + + u64 guest_pending_dbg_exceptions; + u64 guest_sysenter_esp; + u64 guest_sysenter_eip; + + u32 guest_activity_state; + u32 guest_sysenter_cs; + + u64 cr0_guest_host_mask; + u64 cr4_guest_host_mask; + u64 cr0_read_shadow; + u64 cr4_read_shadow; + u64 guest_cr0; + u64 guest_cr3; + u64 guest_cr4; + u64 guest_dr7; + + u64 host_fs_base; + u64 host_gs_base; + u64 host_tr_base; + u64 host_gdtr_base; + u64 host_idtr_base; + u64 host_rsp; + + u64 ept_pointer; + + u16 virtual_processor_id; + u16 padding16_2[3]; + + u64 padding64_2[5]; + u64 guest_physical_address; + + u32 vm_instruction_error; + u32 vm_exit_reason; + u32 vm_exit_intr_info; + u32 vm_exit_intr_error_code; + u32 idt_vectoring_info_field; + u32 idt_vectoring_error_code; + u32 vm_exit_instruction_len; + u32 vmx_instruction_info; + + u64 exit_qualification; + u64 exit_io_instruction_ecx; + u64 exit_io_instruction_esi; + u64 exit_io_instruction_edi; + u64 exit_io_instruction_eip; + + u64 guest_linear_address; + u64 guest_rsp; + u64 guest_rflags; + + u32 guest_interruptibility_info; + u32 cpu_based_vm_exec_control; + u32 exception_bitmap; + u32 vm_entry_controls; + u32 vm_entry_intr_info_field; + u32 vm_entry_exception_error_code; + u32 vm_entry_instruction_len; + u32 tpr_threshold; + + u64 guest_rip; + + u32 hv_clean_fields; + u32 padding32_1; + u32 hv_synthetic_controls; + struct { + u32 nested_flush_hypercall:1; + u32 msr_bitmap:1; + u32 reserved:30; + } __packed hv_enlightenments_control; + u32 hv_vp_id; + u32 padding32_2; + u64 hv_vm_id; + u64 partition_assist_page; + u64 padding64_4[4]; + u64 guest_bndcfgs; + u64 guest_ia32_perf_global_ctrl; + u64 guest_ia32_s_cet; + u64 guest_ssp; + u64 guest_ia32_int_ssp_table_addr; + u64 guest_ia32_lbr_ctl; + u64 padding64_5[2]; + u64 xss_exit_bitmap; + u64 encls_exiting_bitmap; + u64 host_ia32_perf_global_ctrl; + u64 tsc_multiplier; + u64 host_ia32_s_cet; + u64 host_ssp; + u64 host_ia32_int_ssp_table_addr; + u64 padding64_6; +} __packed; +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0 + + +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP BIT(0) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP BIT(1) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2 BIT(2) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1 BIT(3) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC BIT(4) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT BIT(5) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY BIT(6) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN BIT(7) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR BIT(8) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT BIT(9) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC BIT(10) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1 BIT(11) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2 BIT(12) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER BIT(13) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1 BIT(14) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL BIT(15) + +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF + +/* + * Note, Hyper-V isn't actually stealing bit 28 from Intel, just abusing it by + * pairing it with architecturally impossible exit reasons. Bit 28 is set only + * on SMI exits to a SMI transfer monitor (STM) and if and only if a MTF VM-Exit + * is pending. I.e. it will never be set by hardware for non-SMI exits (there + * are only three), nor will it ever be set unless the VMM is an STM. + */ +#define HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH 0x10000031 + +/* + * Hyper-V uses the software reserved 32 bytes in VMCB control area to expose + * SVM enlightenments to guests. This is documented in the TLFS doc. + * Note on naming: SVM_NESTED_ENLIGHTENED_VMCB_FIELDS + */ +struct hv_vmcb_enlightenments { + struct __packed hv_enlightenments_control { + u32 nested_flush_hypercall : 1; + u32 msr_bitmap : 1; + u32 enlightened_npt_tlb: 1; + u32 reserved : 29; + } __packed hv_enlightenments_control; + u32 hv_vp_id; + u64 hv_vm_id; + u64 partition_assist_page; + u64 reserved; +} __packed; + +/* + * Hyper-V uses the software reserved clean bit in VMCB. + */ +#define HV_VMCB_NESTED_ENLIGHTENMENTS 31 + +/* Synthetic VM-Exit */ +#define HV_SVM_EXITCODE_ENL 0xf0000000 +#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH (1) + +/* VM_PARTITION_ASSIST_PAGE */ +struct hv_partition_assist_pg { + u32 tlb_lock_count; +}; + +/* Define connection identifier type. */ +union hv_connection_id { + u32 asu32; + struct { + u32 id : 24; + u32 reserved : 8; + } __packed u; +}; + +struct hv_input_unmap_gpa_pages { + u64 target_partition_id; + u64 target_gpa_base; + u32 unmap_flags; + u32 padding; +} __packed; + +#endif /* #ifndef _HV_HVGDK_H */ diff --git a/include/hyperv/hvgdk_ext.h b/include/hyperv/hvgdk_ext.h new file mode 100644 index 000000000000..641b591ee61f --- /dev/null +++ b/include/hyperv/hvgdk_ext.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Type definitions for the Microsoft Hypervisor. + */ +#ifndef _HV_HVGDK_EXT_H +#define _HV_HVGDK_EXT_H + +#include "hvgdk_mini.h" + +/* Extended hypercalls */ +#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001 +#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003 + +/* Extended hypercalls */ +enum { /* HV_EXT_CALL */ + HV_EXTCALL_QUERY_CAPABILITIES = 0x8001, + HV_EXTCALL_MEMORY_HEAT_HINT = 0x8003, +}; + +/* HV_EXT_OUTPUT_QUERY_CAPABILITIES */ +#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8) + +enum { /* HV_EXT_MEMORY_HEAT_HINT_TYPE */ + HV_EXTMEM_HEAT_HINT_COLD = 0, + HV_EXTMEM_HEAT_HINT_HOT = 1, + HV_EXTMEM_HEAT_HINT_COLD_DISCARD = 2, + HV_EXTMEM_HEAT_HINT_MAX +}; + +/* + * The whole argument should fit in a page to be able to pass to the hypervisor + * in one hypercall. + */ +#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \ + ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \ + sizeof(union hv_gpa_page_range)) + +/* HvExtCallMemoryHeatHint hypercall */ +#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2 +struct hv_memory_hint { /* HV_EXT_INPUT_MEMORY_HEAT_HINT */ + u64 heat_type : 2; /* HV_EXTMEM_HEAT_HINT_* */ + u64 reserved : 62; + union hv_gpa_page_range ranges[]; +} __packed; + +#endif /* _HV_HVGDK_EXT_H */ diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h new file mode 100644 index 000000000000..155615175965 --- /dev/null +++ b/include/hyperv/hvgdk_mini.h @@ -0,0 +1,1348 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Type definitions for the Microsoft hypervisor. + */ +#ifndef _HV_HVGDK_MINI_H +#define _HV_HVGDK_MINI_H + +#include <linux/types.h> +#include <linux/bits.h> + +struct hv_u128 { + u64 low_part; + u64 high_part; +} __packed; + +/* NOTE: when adding below, update hv_status_to_string() */ +#define HV_STATUS_SUCCESS 0x0 +#define HV_STATUS_INVALID_HYPERCALL_CODE 0x2 +#define HV_STATUS_INVALID_HYPERCALL_INPUT 0x3 +#define HV_STATUS_INVALID_ALIGNMENT 0x4 +#define HV_STATUS_INVALID_PARAMETER 0x5 +#define HV_STATUS_ACCESS_DENIED 0x6 +#define HV_STATUS_INVALID_PARTITION_STATE 0x7 +#define HV_STATUS_OPERATION_DENIED 0x8 +#define HV_STATUS_UNKNOWN_PROPERTY 0x9 +#define HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE 0xA +#define HV_STATUS_INSUFFICIENT_MEMORY 0xB +#define HV_STATUS_INVALID_PARTITION_ID 0xD +#define HV_STATUS_INVALID_VP_INDEX 0xE +#define HV_STATUS_NOT_FOUND 0x10 +#define HV_STATUS_INVALID_PORT_ID 0x11 +#define HV_STATUS_INVALID_CONNECTION_ID 0x12 +#define HV_STATUS_INSUFFICIENT_BUFFERS 0x13 +#define HV_STATUS_NOT_ACKNOWLEDGED 0x14 +#define HV_STATUS_INVALID_VP_STATE 0x15 +#define HV_STATUS_NO_RESOURCES 0x1D +#define HV_STATUS_PROCESSOR_FEATURE_NOT_SUPPORTED 0x20 +#define HV_STATUS_INVALID_LP_INDEX 0x41 +#define HV_STATUS_INVALID_REGISTER_VALUE 0x50 +#define HV_STATUS_OPERATION_FAILED 0x71 +#define HV_STATUS_TIME_OUT 0x78 +#define HV_STATUS_CALL_PENDING 0x79 +#define HV_STATUS_VTL_ALREADY_ENABLED 0x86 + +/* + * The Hyper-V TimeRefCount register and the TSC + * page provide a guest VM clock with 100ns tick rate + */ +#define HV_CLOCK_HZ (NSEC_PER_SEC / 100) + +#define HV_HYP_PAGE_SHIFT 12 +#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT) +#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1)) + +#define HV_PARTITION_ID_INVALID ((u64)0) +#define HV_PARTITION_ID_SELF ((u64)-1) + +/* Hyper-V specific model specific registers (MSRs) */ + +#if defined(CONFIG_X86) +/* HV_X64_SYNTHETIC_MSR */ +#define HV_X64_MSR_GUEST_OS_ID 0x40000000 +#define HV_X64_MSR_HYPERCALL 0x40000001 +#define HV_X64_MSR_VP_INDEX 0x40000002 +#define HV_X64_MSR_RESET 0x40000003 +#define HV_X64_MSR_VP_RUNTIME 0x40000010 +#define HV_X64_MSR_TIME_REF_COUNT 0x40000020 +#define HV_X64_MSR_REFERENCE_TSC 0x40000021 +#define HV_X64_MSR_TSC_FREQUENCY 0x40000022 +#define HV_X64_MSR_APIC_FREQUENCY 0x40000023 + +/* Define the virtual APIC registers */ +#define HV_X64_MSR_EOI 0x40000070 +#define HV_X64_MSR_ICR 0x40000071 +#define HV_X64_MSR_TPR 0x40000072 +#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073 + +/* Define synthetic interrupt controller model specific registers. */ +#define HV_X64_MSR_SCONTROL 0x40000080 +#define HV_X64_MSR_SVERSION 0x40000081 +#define HV_X64_MSR_SIEFP 0x40000082 +#define HV_X64_MSR_SIMP 0x40000083 +#define HV_X64_MSR_EOM 0x40000084 +#define HV_X64_MSR_SIRBP 0x40000085 +#define HV_X64_MSR_SINT0 0x40000090 +#define HV_X64_MSR_SINT1 0x40000091 +#define HV_X64_MSR_SINT2 0x40000092 +#define HV_X64_MSR_SINT3 0x40000093 +#define HV_X64_MSR_SINT4 0x40000094 +#define HV_X64_MSR_SINT5 0x40000095 +#define HV_X64_MSR_SINT6 0x40000096 +#define HV_X64_MSR_SINT7 0x40000097 +#define HV_X64_MSR_SINT8 0x40000098 +#define HV_X64_MSR_SINT9 0x40000099 +#define HV_X64_MSR_SINT10 0x4000009A +#define HV_X64_MSR_SINT11 0x4000009B +#define HV_X64_MSR_SINT12 0x4000009C +#define HV_X64_MSR_SINT13 0x4000009D +#define HV_X64_MSR_SINT14 0x4000009E +#define HV_X64_MSR_SINT15 0x4000009F + +/* Define synthetic interrupt controller model specific registers for nested hypervisor */ +#define HV_X64_MSR_NESTED_SCONTROL 0x40001080 +#define HV_X64_MSR_NESTED_SVERSION 0x40001081 +#define HV_X64_MSR_NESTED_SIEFP 0x40001082 +#define HV_X64_MSR_NESTED_SIMP 0x40001083 +#define HV_X64_MSR_NESTED_EOM 0x40001084 +#define HV_X64_MSR_NESTED_SINT0 0x40001090 + +/* + * Synthetic Timer MSRs. Four timers per vcpu. + */ +#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0 +#define HV_X64_MSR_STIMER0_COUNT 0x400000B1 +#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2 +#define HV_X64_MSR_STIMER1_COUNT 0x400000B3 +#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4 +#define HV_X64_MSR_STIMER2_COUNT 0x400000B5 +#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 +#define HV_X64_MSR_STIMER3_COUNT 0x400000B7 + +/* Hyper-V guest idle MSR */ +#define HV_X64_MSR_GUEST_IDLE 0x400000F0 + +/* Hyper-V guest crash notification MSR's */ +#define HV_X64_MSR_CRASH_P0 0x40000100 +#define HV_X64_MSR_CRASH_P1 0x40000101 +#define HV_X64_MSR_CRASH_P2 0x40000102 +#define HV_X64_MSR_CRASH_P3 0x40000103 +#define HV_X64_MSR_CRASH_P4 0x40000104 +#define HV_X64_MSR_CRASH_CTL 0x40000105 + +#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 +#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 +#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ + (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) + +#define HV_X64_MSR_CRASH_PARAMS \ + (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0)) + +#define HV_IPI_LOW_VECTOR 0x10 +#define HV_IPI_HIGH_VECTOR 0xff + +#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001 +#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12 +#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \ + (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1)) + +/* Hyper-V Enlightened VMCS version mask in nested features CPUID */ +#define HV_X64_ENLIGHTENED_VMCS_VERSION 0xff + +#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001 +#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12 + +/* Number of XMM registers used in hypercall input/output */ +#define HV_HYPERCALL_MAX_XMM_REGISTERS 6 + +struct hv_reenlightenment_control { + u64 vector : 8; + u64 reserved1 : 8; + u64 enabled : 1; + u64 reserved2 : 15; + u64 target_vp : 32; +} __packed; + +struct hv_tsc_emulation_status { /* HV_TSC_EMULATION_STATUS */ + u64 inprogress : 1; + u64 reserved : 63; +} __packed; + +struct hv_tsc_emulation_control { /* HV_TSC_INVARIANT_CONTROL */ + u64 enabled : 1; + u64 reserved : 63; +} __packed; + +/* TSC emulation after migration */ +#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 +#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 +#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 +#define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118 +#define HV_EXPOSE_INVARIANT_TSC BIT_ULL(0) + +#endif /* CONFIG_X86 */ + +struct hv_get_partition_id { /* HV_OUTPUT_GET_PARTITION_ID */ + u64 partition_id; +} __packed; + +/* HV_CRASH_CTL_REG_CONTENTS */ +#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62) +#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63) + +union hv_reference_tsc_msr { + u64 as_uint64; + struct { + u64 enable : 1; + u64 reserved : 11; + u64 pfn : 52; + } __packed; +}; + +/* The maximum number of sparse vCPU banks which can be encoded by 'struct hv_vpset' */ +#define HV_MAX_SPARSE_VCPU_BANKS (64) +/* The number of vCPUs in one sparse bank */ +#define HV_VCPUS_PER_SPARSE_BANK (64) + +/* Some of Hyper-V structs do not use hv_vpset where linux uses them */ +struct hv_vpset { /* HV_VP_SET */ + u64 format; + u64 valid_bank_mask; + u64 bank_contents[]; +} __packed; + +/* + * Version info reported by hypervisor + * Changed to a union for convenience + */ +union hv_hypervisor_version_info { + struct { + u32 build_number; + + u32 minor_version : 16; + u32 major_version : 16; + + u32 service_pack; + + u32 service_number : 24; + u32 service_branch : 8; + }; + struct { + u32 eax; + u32 ebx; + u32 ecx; + u32 edx; + }; +}; + +/* HV_CPUID_FUNCTION */ +#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000 +#define HYPERV_CPUID_INTERFACE 0x40000001 +#define HYPERV_CPUID_VERSION 0x40000002 +#define HYPERV_CPUID_FEATURES 0x40000003 +#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 +#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 +#define HYPERV_CPUID_CPU_MANAGEMENT_FEATURES 0x40000007 +#define HYPERV_CPUID_NESTED_FEATURES 0x4000000A +#define HYPERV_CPUID_ISOLATION_CONFIG 0x4000000C + +#define HYPERV_CPUID_VIRT_STACK_INTERFACE 0x40000081 +#define HYPERV_VS_INTERFACE_EAX_SIGNATURE 0x31235356 /* "VS#1" */ + +#define HYPERV_CPUID_VIRT_STACK_PROPERTIES 0x40000082 +/* Support for the extended IOAPIC RTE format */ +#define HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE BIT(2) + +#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000 +#define HYPERV_CPUID_MIN 0x40000005 +#define HYPERV_CPUID_MAX 0x4000ffff + +/* + * HV_X64_HYPERVISOR_FEATURES (EAX), or + * HV_PARTITION_PRIVILEGE_MASK [31-0] + */ +#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0) +#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1) +#define HV_MSR_SYNIC_AVAILABLE BIT(2) +#define HV_MSR_SYNTIMER_AVAILABLE BIT(3) +#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4) +#define HV_MSR_HYPERCALL_AVAILABLE BIT(5) +#define HV_MSR_VP_INDEX_AVAILABLE BIT(6) +#define HV_MSR_RESET_AVAILABLE BIT(7) +#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8) +#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9) +#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10) +#define HV_ACCESS_FREQUENCY_MSRS BIT(11) +#define HV_ACCESS_REENLIGHTENMENT BIT(13) +#define HV_ACCESS_TSC_INVARIANT BIT(15) + +/* + * HV_X64_HYPERVISOR_FEATURES (EBX), or + * HV_PARTITION_PRIVILEGE_MASK [63-32] + */ +#define HV_CREATE_PARTITIONS BIT(0) +#define HV_ACCESS_PARTITION_ID BIT(1) +#define HV_ACCESS_MEMORY_POOL BIT(2) +#define HV_ADJUST_MESSAGE_BUFFERS BIT(3) +#define HV_POST_MESSAGES BIT(4) +#define HV_SIGNAL_EVENTS BIT(5) +#define HV_CREATE_PORT BIT(6) +#define HV_CONNECT_PORT BIT(7) +#define HV_ACCESS_STATS BIT(8) +#define HV_DEBUGGING BIT(11) +#define HV_CPU_MANAGEMENT BIT(12) +#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20) +#define HV_ISOLATION BIT(22) + +#if defined(CONFIG_X86) +/* HV_X64_HYPERVISOR_FEATURES (EDX) */ +#define HV_X64_MWAIT_AVAILABLE BIT(0) +#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1) +#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2) +#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3) +#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4) +#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5) +#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8) +#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10) +#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11) +#define HV_FEATURE_EXT_GVA_RANGES_FLUSH BIT(14) +/* + * Support for returning hypercall output block via XMM + * registers is available + */ +#define HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE BIT(15) +/* stimer Direct Mode is available */ +#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19) + +/* + * Implementation recommendations. Indicates which behaviors the hypervisor + * recommends the OS implement for optimal performance. + * These are HYPERV_CPUID_ENLIGHTMENT_INFO.EAX bits. + */ +/* HV_X64_ENLIGHTENMENT_INFORMATION */ +#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0) +#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1) +#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2) +#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3) +#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4) +#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5) +#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9) +#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10) +#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11) +#define HV_X64_HYPERV_NESTED BIT(12) +#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14) +#define HV_X64_USE_MMIO_HYPERCALLS BIT(21) + +/* + * CPU management features identification. + * These are HYPERV_CPUID_CPU_MANAGEMENT_FEATURES.EAX bits. + */ +#define HV_X64_START_LOGICAL_PROCESSOR BIT(0) +#define HV_X64_CREATE_ROOT_VIRTUAL_PROCESSOR BIT(1) +#define HV_X64_PERFORMANCE_COUNTER_SYNC BIT(2) +#define HV_X64_RESERVED_IDENTITY_BIT BIT(31) + +/* + * Virtual processor will never share a physical core with another virtual + * processor, except for virtual processors that are reported as sibling SMT + * threads. + */ +#define HV_X64_NO_NONARCH_CORESHARING BIT(18) + +/* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */ +#define HV_X64_NESTED_DIRECT_FLUSH BIT(17) +#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18) +#define HV_X64_NESTED_MSR_BITMAP BIT(19) + +/* Nested features #2. These are HYPERV_CPUID_NESTED_FEATURES.EBX bits. */ +#define HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL BIT(0) + +/* + * This is specific to AMD and specifies that enlightened TLB flush is + * supported. If guest opts in to this feature, ASID invalidations only + * flushes gva -> hpa mapping entries. To flush the TLB entries derived + * from NPT, hypercalls should be used (HvFlushGuestPhysicalAddressSpace + * or HvFlushGuestPhysicalAddressList). + */ +#define HV_X64_NESTED_ENLIGHTENED_TLB BIT(22) + +/* HYPERV_CPUID_ISOLATION_CONFIG.EAX bits. */ +#define HV_PARAVISOR_PRESENT BIT(0) + +/* HYPERV_CPUID_ISOLATION_CONFIG.EBX bits. */ +#define HV_ISOLATION_TYPE GENMASK(3, 0) +#define HV_SHARED_GPA_BOUNDARY_ACTIVE BIT(5) +#define HV_SHARED_GPA_BOUNDARY_BITS GENMASK(11, 6) + +enum hv_isolation_type { + HV_ISOLATION_TYPE_NONE = 0, /* HV_PARTITION_ISOLATION_TYPE_NONE */ + HV_ISOLATION_TYPE_VBS = 1, + HV_ISOLATION_TYPE_SNP = 2, + HV_ISOLATION_TYPE_TDX = 3 +}; + +union hv_x64_msr_hypercall_contents { + u64 as_uint64; + struct { + u64 enable : 1; + u64 reserved : 11; + u64 guest_physical_address : 52; + } __packed; +}; +#endif /* CONFIG_X86 */ + +#if defined(CONFIG_ARM64) +#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(8) +#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(13) +#endif /* CONFIG_ARM64 */ + +#if defined(CONFIG_X86) +#define HV_MAXIMUM_PROCESSORS 2048 +#elif defined(CONFIG_ARM64) /* CONFIG_X86 */ +#define HV_MAXIMUM_PROCESSORS 320 +#endif /* CONFIG_ARM64 */ + +#define HV_MAX_VP_INDEX (HV_MAXIMUM_PROCESSORS - 1) +#define HV_VP_INDEX_SELF ((u32)-2) +#define HV_ANY_VP ((u32)-1) + +union hv_vp_assist_msr_contents { /* HV_REGISTER_VP_ASSIST_PAGE */ + u64 as_uint64; + struct { + u64 enable : 1; + u64 reserved : 11; + u64 pfn : 52; + } __packed; +}; + +/* Declare the various hypercall operations. */ +/* HV_CALL_CODE */ +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 +#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 +#define HVCALL_SEND_IPI 0x000b +#define HVCALL_ENABLE_VP_VTL 0x000f +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 +#define HVCALL_SEND_IPI_EX 0x0015 +#define HVCALL_CREATE_PARTITION 0x0040 +#define HVCALL_INITIALIZE_PARTITION 0x0041 +#define HVCALL_FINALIZE_PARTITION 0x0042 +#define HVCALL_DELETE_PARTITION 0x0043 +#define HVCALL_GET_PARTITION_PROPERTY 0x0044 +#define HVCALL_SET_PARTITION_PROPERTY 0x0045 +#define HVCALL_GET_PARTITION_ID 0x0046 +#define HVCALL_DEPOSIT_MEMORY 0x0048 +#define HVCALL_WITHDRAW_MEMORY 0x0049 +#define HVCALL_MAP_GPA_PAGES 0x004b +#define HVCALL_UNMAP_GPA_PAGES 0x004c +#define HVCALL_CREATE_VP 0x004e +#define HVCALL_DELETE_VP 0x004f +#define HVCALL_GET_VP_REGISTERS 0x0050 +#define HVCALL_SET_VP_REGISTERS 0x0051 +#define HVCALL_DELETE_PORT 0x0058 +#define HVCALL_DISCONNECT_PORT 0x005b +#define HVCALL_POST_MESSAGE 0x005c +#define HVCALL_SIGNAL_EVENT 0x005d +#define HVCALL_POST_DEBUG_DATA 0x0069 +#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a +#define HVCALL_RESET_DEBUG_SESSION 0x006b +#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076 +#define HVCALL_GET_SYSTEM_PROPERTY 0x007b +#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c +#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d +#define HVCALL_RETARGET_INTERRUPT 0x007e +#define HVCALL_NOTIFY_PORT_RING_EMPTY 0x008b +#define HVCALL_ASSERT_VIRTUAL_INTERRUPT 0x0094 +#define HVCALL_CREATE_PORT 0x0095 +#define HVCALL_CONNECT_PORT 0x0096 +#define HVCALL_START_VP 0x0099 +#define HVCALL_GET_VP_ID_FROM_APIC_ID 0x009a +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 +#define HVCALL_DISPATCH_VP 0x00c2 +#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db +#define HVCALL_MAP_VP_STATE_PAGE 0x00e1 +#define HVCALL_UNMAP_VP_STATE_PAGE 0x00e2 +#define HVCALL_GET_VP_STATE 0x00e3 +#define HVCALL_SET_VP_STATE 0x00e4 +#define HVCALL_MMIO_READ 0x0106 +#define HVCALL_MMIO_WRITE 0x0107 + +/* HV_HYPERCALL_INPUT */ +#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0) +#define HV_HYPERCALL_FAST_BIT BIT(16) +#define HV_HYPERCALL_VARHEAD_OFFSET 17 +#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17) +#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27) +#define HV_HYPERCALL_NESTED BIT_ULL(31) +#define HV_HYPERCALL_REP_COMP_OFFSET 32 +#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32) +#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32) +#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44) +#define HV_HYPERCALL_REP_START_OFFSET 48 +#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48) +#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60) +#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \ + HV_HYPERCALL_RSVD1_MASK | \ + HV_HYPERCALL_RSVD2_MASK) + +/* HvFlushGuestPhysicalAddressSpace hypercalls */ +struct hv_guest_mapping_flush { + u64 address_space; + u64 flags; +} __packed; + +/* + * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited + * by the bitwidth of "additional_pages" in union hv_gpa_page_range. + */ +#define HV_MAX_FLUSH_PAGES (2048) +#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0 +#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1 + +#define HV_FLUSH_ALL_PROCESSORS BIT(0) +#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1) +#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2) +#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) + +/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */ +union hv_gpa_page_range { + u64 address_space; + struct { + u64 additional_pages : 11; + u64 largepage : 1; + u64 basepfn : 52; + } page; + struct { + u64 reserved : 12; + u64 page_size : 1; + u64 reserved1 : 8; + u64 base_large_pfn : 43; + }; +}; + +/* + * All input flush parameters should be in single page. The max flush + * count is equal with how many entries of union hv_gpa_page_range can + * be populated into the input parameter page. + */ +#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \ + sizeof(union hv_gpa_page_range)) + +struct hv_guest_mapping_flush_list { + u64 address_space; + u64 flags; + union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT]; +}; + +struct hv_tlb_flush { /* HV_INPUT_FLUSH_VIRTUAL_ADDRESS_LIST */ + u64 address_space; + u64 flags; + u64 processor_mask; + u64 gva_list[]; +} __packed; + +/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */ +struct hv_tlb_flush_ex { + u64 address_space; + u64 flags; + struct hv_vpset hv_vp_set; + u64 gva_list[]; +} __packed; + +struct ms_hyperv_tsc_page { /* HV_REFERENCE_TSC_PAGE */ + volatile u32 tsc_sequence; + u32 reserved1; + volatile u64 tsc_scale; + volatile s64 tsc_offset; +} __packed; + +/* Define the number of synthetic interrupt sources. */ +#define HV_SYNIC_SINT_COUNT (16) + +/* Define the expected SynIC version. */ +#define HV_SYNIC_VERSION_1 (0x1) +/* Valid SynIC vectors are 16-255. */ +#define HV_SYNIC_FIRST_VALID_VECTOR (16) + +#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0) +#define HV_SYNIC_SIMP_ENABLE (1ULL << 0) +#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0) +#define HV_SYNIC_SINT_MASKED (1ULL << 16) +#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17) +#define HV_SYNIC_SINT_VECTOR_MASK (0xFF) + +# + +/* Hyper-V defined statically assigned SINTs */ +#define HV_SYNIC_INTERCEPTION_SINT_INDEX 0x00000000 +#define HV_SYNIC_IOMMU_FAULT_SINT_INDEX 0x00000001 +#define HV_SYNIC_VMBUS_SINT_INDEX 0x00000002 +#define HV_SYNIC_FIRST_UNUSED_SINT_INDEX 0x00000005 + +/* mshv assigned SINT for doorbell */ +#define HV_SYNIC_DOORBELL_SINT_INDEX HV_SYNIC_FIRST_UNUSED_SINT_INDEX + +enum hv_interrupt_type { + HV_X64_INTERRUPT_TYPE_FIXED = 0x0000, + HV_X64_INTERRUPT_TYPE_LOWESTPRIORITY = 0x0001, + HV_X64_INTERRUPT_TYPE_SMI = 0x0002, + HV_X64_INTERRUPT_TYPE_REMOTEREAD = 0x0003, + HV_X64_INTERRUPT_TYPE_NMI = 0x0004, + HV_X64_INTERRUPT_TYPE_INIT = 0x0005, + HV_X64_INTERRUPT_TYPE_SIPI = 0x0006, + HV_X64_INTERRUPT_TYPE_EXTINT = 0x0007, + HV_X64_INTERRUPT_TYPE_LOCALINT0 = 0x0008, + HV_X64_INTERRUPT_TYPE_LOCALINT1 = 0x0009, + HV_X64_INTERRUPT_TYPE_MAXIMUM = 0x000A, +}; + +/* Define synthetic interrupt source. */ +union hv_synic_sint { + u64 as_uint64; + struct { + u64 vector : 8; + u64 reserved1 : 8; + u64 masked : 1; + u64 auto_eoi : 1; + u64 polling : 1; + u64 as_intercept : 1; + u64 proxy : 1; + u64 reserved2 : 43; + } __packed; +}; + +union hv_x64_xsave_xfem_register { + u64 as_uint64; + struct { + u32 low_uint32; + u32 high_uint32; + } __packed; + struct { + u64 legacy_x87 : 1; + u64 legacy_sse : 1; + u64 avx : 1; + u64 mpx_bndreg : 1; + u64 mpx_bndcsr : 1; + u64 avx_512_op_mask : 1; + u64 avx_512_zmmhi : 1; + u64 avx_512_zmm16_31 : 1; + u64 rsvd8_9 : 2; + u64 pasid : 1; + u64 cet_u : 1; + u64 cet_s : 1; + u64 rsvd13_16 : 4; + u64 xtile_cfg : 1; + u64 xtile_data : 1; + u64 rsvd19_63 : 45; + } __packed; +}; + +/* Synthetic timer configuration */ +union hv_stimer_config { /* HV_X64_MSR_STIMER_CONFIG_CONTENTS */ + u64 as_uint64; + struct { + u64 enable : 1; + u64 periodic : 1; + u64 lazy : 1; + u64 auto_enable : 1; + u64 apic_vector : 8; + u64 direct_mode : 1; + u64 reserved_z0 : 3; + u64 sintx : 4; + u64 reserved_z1 : 44; + } __packed; +}; + +/* Define the number of synthetic timers */ +#define HV_SYNIC_STIMER_COUNT (4) + +/* Define port identifier type. */ +union hv_port_id { + u32 asu32; + struct { + u32 id : 24; + u32 reserved : 8; + } __packed u; +}; + +#define HV_MESSAGE_SIZE (256) +#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240) +#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30) + +/* Define hypervisor message types. */ +enum hv_message_type { + HVMSG_NONE = 0x00000000, + + /* Memory access messages. */ + HVMSG_UNMAPPED_GPA = 0x80000000, + HVMSG_GPA_INTERCEPT = 0x80000001, + + /* Timer notification messages. */ + HVMSG_TIMER_EXPIRED = 0x80000010, + + /* Error messages. */ + HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020, + HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021, + HVMSG_UNSUPPORTED_FEATURE = 0x80000022, + + /* + * Opaque intercept message. The original intercept message is only + * accessible from the mapped intercept message page. + */ + HVMSG_OPAQUE_INTERCEPT = 0x8000003F, + + /* Trace buffer complete messages. */ + HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040, + + /* Hypercall intercept */ + HVMSG_HYPERCALL_INTERCEPT = 0x80000050, + + /* SynIC intercepts */ + HVMSG_SYNIC_EVENT_INTERCEPT = 0x80000060, + HVMSG_SYNIC_SINT_INTERCEPT = 0x80000061, + HVMSG_SYNIC_SINT_DELIVERABLE = 0x80000062, + + /* Async call completion intercept */ + HVMSG_ASYNC_CALL_COMPLETION = 0x80000070, + + /* Root scheduler messages */ + HVMSG_SCHEDULER_VP_SIGNAL_BITSET = 0x80000100, + HVMSG_SCHEDULER_VP_SIGNAL_PAIR = 0x80000101, + + /* Platform-specific processor intercept messages. */ + HVMSG_X64_IO_PORT_INTERCEPT = 0x80010000, + HVMSG_X64_MSR_INTERCEPT = 0x80010001, + HVMSG_X64_CPUID_INTERCEPT = 0x80010002, + HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003, + HVMSG_X64_APIC_EOI = 0x80010004, + HVMSG_X64_LEGACY_FP_ERROR = 0x80010005, + HVMSG_X64_IOMMU_PRQ = 0x80010006, + HVMSG_X64_HALT = 0x80010007, + HVMSG_X64_INTERRUPTION_DELIVERABLE = 0x80010008, + HVMSG_X64_SIPI_INTERCEPT = 0x80010009, +}; + +/* Define the format of the SIMP register */ +union hv_synic_simp { + u64 as_uint64; + struct { + u64 simp_enabled : 1; + u64 preserved : 11; + u64 base_simp_gpa : 52; + } __packed; +}; + +union hv_message_flags { + u8 asu8; + struct { + u8 msg_pending : 1; + u8 reserved : 7; + } __packed; +}; + +struct hv_message_header { + u32 message_type; + u8 payload_size; + union hv_message_flags message_flags; + u8 reserved[2]; + union { + u64 sender; + union hv_port_id port; + }; +} __packed; + +/* + * Message format for notifications delivered via + * intercept message(as_intercept=1) + */ +struct hv_notification_message_payload { + u32 sint_index; +} __packed; + +struct hv_message { + struct hv_message_header header; + union { + u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; + } u; +} __packed; + +/* Define the synthetic interrupt message page layout. */ +struct hv_message_page { + struct hv_message sint_message[HV_SYNIC_SINT_COUNT]; +} __packed; + +/* Define timer message payload structure. */ +struct hv_timer_message_payload { + __u32 timer_index; + __u32 reserved; + __u64 expiration_time; /* When the timer expired */ + __u64 delivery_time; /* When the message was delivered */ +} __packed; + +struct hv_x64_segment_register { + u64 base; + u32 limit; + u16 selector; + union { + struct { + u16 segment_type : 4; + u16 non_system_segment : 1; + u16 descriptor_privilege_level : 2; + u16 present : 1; + u16 reserved : 4; + u16 available : 1; + u16 _long : 1; + u16 _default : 1; + u16 granularity : 1; + } __packed; + u16 attributes; + }; +} __packed; + +struct hv_x64_table_register { + u16 pad[3]; + u16 limit; + u64 base; +} __packed; + +union hv_input_vtl { + u8 as_uint8; + struct { + u8 target_vtl : 4; + u8 use_target_vtl : 1; + u8 reserved_z : 3; + }; +} __packed; + +struct hv_init_vp_context { + u64 rip; + u64 rsp; + u64 rflags; + + struct hv_x64_segment_register cs; + struct hv_x64_segment_register ds; + struct hv_x64_segment_register es; + struct hv_x64_segment_register fs; + struct hv_x64_segment_register gs; + struct hv_x64_segment_register ss; + struct hv_x64_segment_register tr; + struct hv_x64_segment_register ldtr; + + struct hv_x64_table_register idtr; + struct hv_x64_table_register gdtr; + + u64 efer; + u64 cr0; + u64 cr3; + u64 cr4; + u64 msr_cr_pat; +} __packed; + +struct hv_enable_vp_vtl { + u64 partition_id; + u32 vp_index; + union hv_input_vtl target_vtl; + u8 mbz0; + u16 mbz1; + struct hv_init_vp_context vp_context; +} __packed; + +struct hv_get_vp_from_apic_id_in { + u64 partition_id; + union hv_input_vtl target_vtl; + u8 res[7]; + u32 apic_ids[]; +} __packed; + +struct hv_nested_enlightenments_control { + struct { + u32 directhypercall : 1; + u32 reserved : 31; + } __packed features; + struct { + u32 inter_partition_comm : 1; + u32 reserved : 31; + } __packed hypercall_controls; +} __packed; + +/* Define virtual processor assist page structure. */ +struct hv_vp_assist_page { + u32 apic_assist; + u32 reserved1; + u32 vtl_entry_reason; + u32 vtl_reserved; + u64 vtl_ret_x64rax; + u64 vtl_ret_x64rcx; + struct hv_nested_enlightenments_control nested_control; + u8 enlighten_vmentry; + u8 reserved2[7]; + u64 current_nested_vmcs; + u8 synthetic_time_unhalted_timer_expired; + u8 reserved3[7]; + u8 virtualization_fault_information[40]; + u8 reserved4[8]; + u8 intercept_message[256]; + u8 vtl_ret_actions[256]; +} __packed; + +enum hv_register_name { + /* Suspend Registers */ + HV_REGISTER_EXPLICIT_SUSPEND = 0x00000000, + HV_REGISTER_INTERCEPT_SUSPEND = 0x00000001, + HV_REGISTER_DISPATCH_SUSPEND = 0x00000003, + + /* Version - 128-bit result same as CPUID 0x40000002 */ + HV_REGISTER_HYPERVISOR_VERSION = 0x00000100, + + /* Feature Access (registers are 128 bits) - same as CPUID 0x40000003 - 0x4000000B */ + HV_REGISTER_PRIVILEGES_AND_FEATURES_INFO = 0x00000200, + HV_REGISTER_FEATURES_INFO = 0x00000201, + HV_REGISTER_IMPLEMENTATION_LIMITS_INFO = 0x00000202, + HV_REGISTER_HARDWARE_FEATURES_INFO = 0x00000203, + HV_REGISTER_CPU_MANAGEMENT_FEATURES_INFO = 0x00000204, + HV_REGISTER_SVM_FEATURES_INFO = 0x00000205, + HV_REGISTER_SKIP_LEVEL_FEATURES_INFO = 0x00000206, + HV_REGISTER_NESTED_VIRT_FEATURES_INFO = 0x00000207, + HV_REGISTER_IPT_FEATURES_INFO = 0x00000208, + + /* Guest Crash Registers */ + HV_REGISTER_GUEST_CRASH_P0 = 0x00000210, + HV_REGISTER_GUEST_CRASH_P1 = 0x00000211, + HV_REGISTER_GUEST_CRASH_P2 = 0x00000212, + HV_REGISTER_GUEST_CRASH_P3 = 0x00000213, + HV_REGISTER_GUEST_CRASH_P4 = 0x00000214, + HV_REGISTER_GUEST_CRASH_CTL = 0x00000215, + + /* Misc */ + HV_REGISTER_VP_RUNTIME = 0x00090000, + HV_REGISTER_GUEST_OS_ID = 0x00090002, + HV_REGISTER_VP_INDEX = 0x00090003, + HV_REGISTER_TIME_REF_COUNT = 0x00090004, + HV_REGISTER_CPU_MANAGEMENT_VERSION = 0x00090007, + HV_REGISTER_VP_ASSIST_PAGE = 0x00090013, + HV_REGISTER_VP_ROOT_SIGNAL_COUNT = 0x00090014, + HV_REGISTER_REFERENCE_TSC = 0x00090017, + + /* Hypervisor-defined Registers (Synic) */ + HV_REGISTER_SINT0 = 0x000A0000, + HV_REGISTER_SINT1 = 0x000A0001, + HV_REGISTER_SINT2 = 0x000A0002, + HV_REGISTER_SINT3 = 0x000A0003, + HV_REGISTER_SINT4 = 0x000A0004, + HV_REGISTER_SINT5 = 0x000A0005, + HV_REGISTER_SINT6 = 0x000A0006, + HV_REGISTER_SINT7 = 0x000A0007, + HV_REGISTER_SINT8 = 0x000A0008, + HV_REGISTER_SINT9 = 0x000A0009, + HV_REGISTER_SINT10 = 0x000A000A, + HV_REGISTER_SINT11 = 0x000A000B, + HV_REGISTER_SINT12 = 0x000A000C, + HV_REGISTER_SINT13 = 0x000A000D, + HV_REGISTER_SINT14 = 0x000A000E, + HV_REGISTER_SINT15 = 0x000A000F, + HV_REGISTER_SCONTROL = 0x000A0010, + HV_REGISTER_SVERSION = 0x000A0011, + HV_REGISTER_SIEFP = 0x000A0012, + HV_REGISTER_SIMP = 0x000A0013, + HV_REGISTER_EOM = 0x000A0014, + HV_REGISTER_SIRBP = 0x000A0015, + + HV_REGISTER_NESTED_SINT0 = 0x000A1000, + HV_REGISTER_NESTED_SINT1 = 0x000A1001, + HV_REGISTER_NESTED_SINT2 = 0x000A1002, + HV_REGISTER_NESTED_SINT3 = 0x000A1003, + HV_REGISTER_NESTED_SINT4 = 0x000A1004, + HV_REGISTER_NESTED_SINT5 = 0x000A1005, + HV_REGISTER_NESTED_SINT6 = 0x000A1006, + HV_REGISTER_NESTED_SINT7 = 0x000A1007, + HV_REGISTER_NESTED_SINT8 = 0x000A1008, + HV_REGISTER_NESTED_SINT9 = 0x000A1009, + HV_REGISTER_NESTED_SINT10 = 0x000A100A, + HV_REGISTER_NESTED_SINT11 = 0x000A100B, + HV_REGISTER_NESTED_SINT12 = 0x000A100C, + HV_REGISTER_NESTED_SINT13 = 0x000A100D, + HV_REGISTER_NESTED_SINT14 = 0x000A100E, + HV_REGISTER_NESTED_SINT15 = 0x000A100F, + HV_REGISTER_NESTED_SCONTROL = 0x000A1010, + HV_REGISTER_NESTED_SVERSION = 0x000A1011, + HV_REGISTER_NESTED_SIFP = 0x000A1012, + HV_REGISTER_NESTED_SIPP = 0x000A1013, + HV_REGISTER_NESTED_EOM = 0x000A1014, + HV_REGISTER_NESTED_SIRBP = 0x000a1015, + + /* Hypervisor-defined Registers (Synthetic Timers) */ + HV_REGISTER_STIMER0_CONFIG = 0x000B0000, + HV_REGISTER_STIMER0_COUNT = 0x000B0001, + + /* VSM */ + HV_REGISTER_VSM_VP_STATUS = 0x000D0003, +}; + +/* + * Arch compatibility regs for use with hv_set/get_register + */ +#if defined(CONFIG_X86) + +/* + * To support arch-generic code calling hv_set/get_register: + * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl + * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall + */ +#define HV_MSR_CRASH_P0 (HV_X64_MSR_CRASH_P0) +#define HV_MSR_CRASH_P1 (HV_X64_MSR_CRASH_P1) +#define HV_MSR_CRASH_P2 (HV_X64_MSR_CRASH_P2) +#define HV_MSR_CRASH_P3 (HV_X64_MSR_CRASH_P3) +#define HV_MSR_CRASH_P4 (HV_X64_MSR_CRASH_P4) +#define HV_MSR_CRASH_CTL (HV_X64_MSR_CRASH_CTL) + +#define HV_MSR_VP_INDEX (HV_X64_MSR_VP_INDEX) +#define HV_MSR_TIME_REF_COUNT (HV_X64_MSR_TIME_REF_COUNT) +#define HV_MSR_REFERENCE_TSC (HV_X64_MSR_REFERENCE_TSC) + +#define HV_MSR_SINT0 (HV_X64_MSR_SINT0) +#define HV_MSR_SVERSION (HV_X64_MSR_SVERSION) +#define HV_MSR_SCONTROL (HV_X64_MSR_SCONTROL) +#define HV_MSR_SIEFP (HV_X64_MSR_SIEFP) +#define HV_MSR_SIMP (HV_X64_MSR_SIMP) +#define HV_MSR_EOM (HV_X64_MSR_EOM) +#define HV_MSR_SIRBP (HV_X64_MSR_SIRBP) + +#define HV_MSR_NESTED_SCONTROL (HV_X64_MSR_NESTED_SCONTROL) +#define HV_MSR_NESTED_SVERSION (HV_X64_MSR_NESTED_SVERSION) +#define HV_MSR_NESTED_SIEFP (HV_X64_MSR_NESTED_SIEFP) +#define HV_MSR_NESTED_SIMP (HV_X64_MSR_NESTED_SIMP) +#define HV_MSR_NESTED_EOM (HV_X64_MSR_NESTED_EOM) +#define HV_MSR_NESTED_SINT0 (HV_X64_MSR_NESTED_SINT0) + +#define HV_MSR_STIMER0_CONFIG (HV_X64_MSR_STIMER0_CONFIG) +#define HV_MSR_STIMER0_COUNT (HV_X64_MSR_STIMER0_COUNT) + +#elif defined(CONFIG_ARM64) /* CONFIG_X86 */ + +#define HV_MSR_CRASH_P0 (HV_REGISTER_GUEST_CRASH_P0) +#define HV_MSR_CRASH_P1 (HV_REGISTER_GUEST_CRASH_P1) +#define HV_MSR_CRASH_P2 (HV_REGISTER_GUEST_CRASH_P2) +#define HV_MSR_CRASH_P3 (HV_REGISTER_GUEST_CRASH_P3) +#define HV_MSR_CRASH_P4 (HV_REGISTER_GUEST_CRASH_P4) +#define HV_MSR_CRASH_CTL (HV_REGISTER_GUEST_CRASH_CTL) + +#define HV_MSR_VP_INDEX (HV_REGISTER_VP_INDEX) +#define HV_MSR_TIME_REF_COUNT (HV_REGISTER_TIME_REF_COUNT) +#define HV_MSR_REFERENCE_TSC (HV_REGISTER_REFERENCE_TSC) + +#define HV_MSR_SINT0 (HV_REGISTER_SINT0) +#define HV_MSR_SCONTROL (HV_REGISTER_SCONTROL) +#define HV_MSR_SIEFP (HV_REGISTER_SIEFP) +#define HV_MSR_SIMP (HV_REGISTER_SIMP) +#define HV_MSR_EOM (HV_REGISTER_EOM) +#define HV_MSR_SIRBP (HV_REGISTER_SIRBP) + +#define HV_MSR_STIMER0_CONFIG (HV_REGISTER_STIMER0_CONFIG) +#define HV_MSR_STIMER0_COUNT (HV_REGISTER_STIMER0_COUNT) + +#endif /* CONFIG_ARM64 */ + +union hv_explicit_suspend_register { + u64 as_uint64; + struct { + u64 suspended : 1; + u64 reserved : 63; + } __packed; +}; + +union hv_intercept_suspend_register { + u64 as_uint64; + struct { + u64 suspended : 1; + u64 reserved : 63; + } __packed; +}; + +union hv_dispatch_suspend_register { + u64 as_uint64; + struct { + u64 suspended : 1; + u64 reserved : 63; + } __packed; +}; + +union hv_arm64_pending_interruption_register { + u64 as_uint64; + struct { + u64 interruption_pending : 1; + u64 interruption_type: 1; + u64 reserved : 30; + u64 error_code : 32; + } __packed; +}; + +union hv_arm64_interrupt_state_register { + u64 as_uint64; + struct { + u64 interrupt_shadow : 1; + u64 reserved : 63; + } __packed; +}; + +union hv_arm64_pending_synthetic_exception_event { + u64 as_uint64[2]; + struct { + u8 event_pending : 1; + u8 event_type : 3; + u8 reserved : 4; + u8 rsvd[3]; + u32 exception_type; + u64 context; + } __packed; +}; + +union hv_x64_interrupt_state_register { + u64 as_uint64; + struct { + u64 interrupt_shadow : 1; + u64 nmi_masked : 1; + u64 reserved : 62; + } __packed; +}; + +union hv_x64_pending_interruption_register { + u64 as_uint64; + struct { + u32 interruption_pending : 1; + u32 interruption_type : 3; + u32 deliver_error_code : 1; + u32 instruction_length : 4; + u32 nested_event : 1; + u32 reserved : 6; + u32 interruption_vector : 16; + u32 error_code; + } __packed; +}; + +union hv_register_value { + struct hv_u128 reg128; + u64 reg64; + u32 reg32; + u16 reg16; + u8 reg8; + + struct hv_x64_segment_register segment; + struct hv_x64_table_register table; + union hv_explicit_suspend_register explicit_suspend; + union hv_intercept_suspend_register intercept_suspend; + union hv_dispatch_suspend_register dispatch_suspend; +#ifdef CONFIG_ARM64 + union hv_arm64_interrupt_state_register interrupt_state; + union hv_arm64_pending_interruption_register pending_interruption; +#endif +#ifdef CONFIG_X86 + union hv_x64_interrupt_state_register interrupt_state; + union hv_x64_pending_interruption_register pending_interruption; +#endif + union hv_arm64_pending_synthetic_exception_event pending_synthetic_exception_event; +}; + +/* NOTE: Linux helper struct - NOT from Hyper-V code. */ +struct hv_output_get_vp_registers { + DECLARE_FLEX_ARRAY(union hv_register_value, values); +}; + +#if defined(CONFIG_ARM64) +/* HvGetVpRegisters returns an array of these output elements */ +struct hv_get_vp_registers_output { + union { + struct { + u32 a; + u32 b; + u32 c; + u32 d; + } as32 __packed; + struct { + u64 low; + u64 high; + } as64 __packed; + }; +}; + +#endif /* CONFIG_ARM64 */ + +struct hv_register_assoc { + u32 name; /* enum hv_register_name */ + u32 reserved1; + u64 reserved2; + union hv_register_value value; +} __packed; + +struct hv_input_get_vp_registers { + u64 partition_id; + u32 vp_index; + union hv_input_vtl input_vtl; + u8 rsvd_z8; + u16 rsvd_z16; + u32 names[]; +} __packed; + +struct hv_input_set_vp_registers { + u64 partition_id; + u32 vp_index; + union hv_input_vtl input_vtl; + u8 rsvd_z8; + u16 rsvd_z16; + struct hv_register_assoc elements[]; +} __packed; + +#define HV_UNMAP_GPA_LARGE_PAGE 0x2 + +/* HvCallSendSyntheticClusterIpi hypercall */ +struct hv_send_ipi { /* HV_INPUT_SEND_SYNTHETIC_CLUSTER_IPI */ + u32 vector; + u32 reserved; + u64 cpu_mask; +} __packed; + +#define HV_X64_VTL_MASK GENMASK(3, 0) + +/* Hyper-V memory host visibility */ +enum hv_mem_host_visibility { + VMBUS_PAGE_NOT_VISIBLE = 0, + VMBUS_PAGE_VISIBLE_READ_ONLY = 1, + VMBUS_PAGE_VISIBLE_READ_WRITE = 3 +}; + +/* HvCallModifySparseGpaPageHostVisibility hypercall */ +#define HV_MAX_MODIFY_GPA_REP_COUNT ((HV_HYP_PAGE_SIZE / sizeof(u64)) - 2) +struct hv_gpa_range_for_visibility { + u64 partition_id; + u32 host_visibility : 2; + u32 reserved0 : 30; + u32 reserved1; + u64 gpa_page_list[HV_MAX_MODIFY_GPA_REP_COUNT]; +} __packed; + +#if defined(CONFIG_X86) +union hv_msi_address_register { /* HV_MSI_ADDRESS */ + u32 as_uint32; + struct { + u32 reserved1 : 2; + u32 destination_mode : 1; + u32 redirection_hint : 1; + u32 reserved2 : 8; + u32 destination_id : 8; + u32 msi_base : 12; + }; +} __packed; + +union hv_msi_data_register { /* HV_MSI_ENTRY.Data */ + u32 as_uint32; + struct { + u32 vector : 8; + u32 delivery_mode : 3; + u32 reserved1 : 3; + u32 level_assert : 1; + u32 trigger_mode : 1; + u32 reserved2 : 16; + }; +} __packed; + +union hv_msi_entry { /* HV_MSI_ENTRY */ + + u64 as_uint64; + struct { + union hv_msi_address_register address; + union hv_msi_data_register data; + } __packed; +}; + +#elif defined(CONFIG_ARM64) /* CONFIG_X86 */ + +union hv_msi_entry { + u64 as_uint64[2]; + struct { + u64 address; + u32 data; + u32 reserved; + } __packed; +}; +#endif /* CONFIG_ARM64 */ + +union hv_ioapic_rte { + u64 as_uint64; + + struct { + u32 vector : 8; + u32 delivery_mode : 3; + u32 destination_mode : 1; + u32 delivery_status : 1; + u32 interrupt_polarity : 1; + u32 remote_irr : 1; + u32 trigger_mode : 1; + u32 interrupt_mask : 1; + u32 reserved1 : 15; + + u32 reserved2 : 24; + u32 destination_id : 8; + }; + + struct { + u32 low_uint32; + u32 high_uint32; + }; +} __packed; + +enum hv_interrupt_source { /* HV_INTERRUPT_SOURCE */ + HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */ + HV_INTERRUPT_SOURCE_IOAPIC, +}; + +struct hv_interrupt_entry { /* HV_INTERRUPT_ENTRY */ + u32 source; + u32 reserved1; + union { + union hv_msi_entry msi_entry; + union hv_ioapic_rte ioapic_rte; + }; +} __packed; + +#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 +#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 + +struct hv_device_interrupt_target { /* HV_DEVICE_INTERRUPT_TARGET */ + u32 vector; + u32 flags; /* HV_DEVICE_INTERRUPT_TARGET_* above */ + union { + u64 vp_mask; + struct hv_vpset vp_set; + }; +} __packed; + +struct hv_retarget_device_interrupt { /* HV_INPUT_RETARGET_DEVICE_INTERRUPT */ + u64 partition_id; /* use "self" */ + u64 device_id; + struct hv_interrupt_entry int_entry; + u64 reserved2; + struct hv_device_interrupt_target int_target; +} __packed __aligned(8); + +/* Data structures for HVCALL_MMIO_READ and HVCALL_MMIO_WRITE */ +#define HV_HYPERCALL_MMIO_MAX_DATA_LENGTH 64 + +struct hv_mmio_read_input { /* HV_INPUT_MEMORY_MAPPED_IO_READ */ + u64 gpa; + u32 size; + u32 reserved; +} __packed; + +struct hv_mmio_read_output { + u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH]; +} __packed; + +struct hv_mmio_write_input { + u64 gpa; + u32 size; + u32 reserved; + u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH]; +} __packed; + +#endif /* _HV_HVGDK_MINI_H */ diff --git a/include/hyperv/hvhdk.h b/include/hyperv/hvhdk.h new file mode 100644 index 000000000000..64407c2a3809 --- /dev/null +++ b/include/hyperv/hvhdk.h @@ -0,0 +1,733 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Type definitions for the Microsoft hypervisor. + */ +#ifndef _HV_HVHDK_H +#define _HV_HVHDK_H + +#include <linux/build_bug.h> + +#include "hvhdk_mini.h" +#include "hvgdk.h" + +/* Bits for dirty mask of hv_vp_register_page */ +#define HV_X64_REGISTER_CLASS_GENERAL 0 +#define HV_X64_REGISTER_CLASS_IP 1 +#define HV_X64_REGISTER_CLASS_XMM 2 +#define HV_X64_REGISTER_CLASS_SEGMENT 3 +#define HV_X64_REGISTER_CLASS_FLAGS 4 + +#define HV_VP_REGISTER_PAGE_VERSION_1 1u + +struct hv_vp_register_page { + u16 version; + u8 isvalid; + u8 rsvdz; + u32 dirty; + union { + struct { + /* General purpose registers + * (HV_X64_REGISTER_CLASS_GENERAL) + */ + union { + struct { + u64 rax; + u64 rcx; + u64 rdx; + u64 rbx; + u64 rsp; + u64 rbp; + u64 rsi; + u64 rdi; + u64 r8; + u64 r9; + u64 r10; + u64 r11; + u64 r12; + u64 r13; + u64 r14; + u64 r15; + } __packed; + + u64 gp_registers[16]; + }; + /* Instruction pointer (HV_X64_REGISTER_CLASS_IP) */ + u64 rip; + /* Flags (HV_X64_REGISTER_CLASS_FLAGS) */ + u64 rflags; + } __packed; + + u64 registers[18]; + }; + /* Volatile XMM registers (HV_X64_REGISTER_CLASS_XMM) */ + union { + struct { + struct hv_u128 xmm0; + struct hv_u128 xmm1; + struct hv_u128 xmm2; + struct hv_u128 xmm3; + struct hv_u128 xmm4; + struct hv_u128 xmm5; + } __packed; + + struct hv_u128 xmm_registers[6]; + }; + /* Segment registers (HV_X64_REGISTER_CLASS_SEGMENT) */ + union { + struct { + struct hv_x64_segment_register es; + struct hv_x64_segment_register cs; + struct hv_x64_segment_register ss; + struct hv_x64_segment_register ds; + struct hv_x64_segment_register fs; + struct hv_x64_segment_register gs; + } __packed; + + struct hv_x64_segment_register segment_registers[6]; + }; + /* Misc. control registers (cannot be set via this interface) */ + u64 cr0; + u64 cr3; + u64 cr4; + u64 cr8; + u64 efer; + u64 dr7; + union hv_x64_pending_interruption_register pending_interruption; + union hv_x64_interrupt_state_register interrupt_state; + u64 instruction_emulation_hints; +} __packed; + +#define HV_PARTITION_PROCESSOR_FEATURES_BANKS 2 + +union hv_partition_processor_features { + u64 as_uint64[HV_PARTITION_PROCESSOR_FEATURES_BANKS]; + struct { + u64 sse3_support : 1; + u64 lahf_sahf_support : 1; + u64 ssse3_support : 1; + u64 sse4_1_support : 1; + u64 sse4_2_support : 1; + u64 sse4a_support : 1; + u64 xop_support : 1; + u64 pop_cnt_support : 1; + u64 cmpxchg16b_support : 1; + u64 altmovcr8_support : 1; + u64 lzcnt_support : 1; + u64 mis_align_sse_support : 1; + u64 mmx_ext_support : 1; + u64 amd3dnow_support : 1; + u64 extended_amd3dnow_support : 1; + u64 page_1gb_support : 1; + u64 aes_support : 1; + u64 pclmulqdq_support : 1; + u64 pcid_support : 1; + u64 fma4_support : 1; + u64 f16c_support : 1; + u64 rd_rand_support : 1; + u64 rd_wr_fs_gs_support : 1; + u64 smep_support : 1; + u64 enhanced_fast_string_support : 1; + u64 bmi1_support : 1; + u64 bmi2_support : 1; + u64 hle_support_deprecated : 1; + u64 rtm_support_deprecated : 1; + u64 movbe_support : 1; + u64 npiep1_support : 1; + u64 dep_x87_fpu_save_support : 1; + u64 rd_seed_support : 1; + u64 adx_support : 1; + u64 intel_prefetch_support : 1; + u64 smap_support : 1; + u64 hle_support : 1; + u64 rtm_support : 1; + u64 rdtscp_support : 1; + u64 clflushopt_support : 1; + u64 clwb_support : 1; + u64 sha_support : 1; + u64 x87_pointers_saved_support : 1; + u64 invpcid_support : 1; + u64 ibrs_support : 1; + u64 stibp_support : 1; + u64 ibpb_support: 1; + u64 unrestricted_guest_support : 1; + u64 mdd_support : 1; + u64 fast_short_rep_mov_support : 1; + u64 l1dcache_flush_support : 1; + u64 rdcl_no_support : 1; + u64 ibrs_all_support : 1; + u64 skip_l1df_support : 1; + u64 ssb_no_support : 1; + u64 rsb_a_no_support : 1; + u64 virt_spec_ctrl_support : 1; + u64 rd_pid_support : 1; + u64 umip_support : 1; + u64 mbs_no_support : 1; + u64 mb_clear_support : 1; + u64 taa_no_support : 1; + u64 tsx_ctrl_support : 1; + /* + * N.B. The final processor feature bit in bank 0 is reserved to + * simplify potential downlevel backports. + */ + u64 reserved_bank0 : 1; + + /* N.B. Begin bank 1 processor features. */ + u64 acount_mcount_support : 1; + u64 tsc_invariant_support : 1; + u64 cl_zero_support : 1; + u64 rdpru_support : 1; + u64 la57_support : 1; + u64 mbec_support : 1; + u64 nested_virt_support : 1; + u64 psfd_support : 1; + u64 cet_ss_support : 1; + u64 cet_ibt_support : 1; + u64 vmx_exception_inject_support : 1; + u64 enqcmd_support : 1; + u64 umwait_tpause_support : 1; + u64 movdiri_support : 1; + u64 movdir64b_support : 1; + u64 cldemote_support : 1; + u64 serialize_support : 1; + u64 tsc_deadline_tmr_support : 1; + u64 tsc_adjust_support : 1; + u64 fzlrep_movsb : 1; + u64 fsrep_stosb : 1; + u64 fsrep_cmpsb : 1; + u64 reserved_bank1 : 42; + } __packed; +}; + +union hv_partition_processor_xsave_features { + struct { + u64 xsave_support : 1; + u64 xsaveopt_support : 1; + u64 avx_support : 1; + u64 reserved1 : 61; + } __packed; + u64 as_uint64; +}; + +struct hv_partition_creation_properties { + union hv_partition_processor_features disabled_processor_features; + union hv_partition_processor_xsave_features + disabled_processor_xsave_features; +} __packed; + +#define HV_PARTITION_SYNTHETIC_PROCESSOR_FEATURES_BANKS 1 + +union hv_partition_synthetic_processor_features { + u64 as_uint64[HV_PARTITION_SYNTHETIC_PROCESSOR_FEATURES_BANKS]; + + struct { + u64 hypervisor_present : 1; + /* Support for HV#1: (CPUID leaves 0x40000000 - 0x40000006)*/ + u64 hv1 : 1; + u64 access_vp_run_time_reg : 1; /* HV_X64_MSR_VP_RUNTIME */ + u64 access_partition_reference_counter : 1; /* HV_X64_MSR_TIME_REF_COUNT */ + u64 access_synic_regs : 1; /* SINT-related registers */ + /* + * Access to HV_X64_MSR_STIMER0_CONFIG through + * HV_X64_MSR_STIMER3_COUNT. + */ + u64 access_synthetic_timer_regs : 1; + u64 access_intr_ctrl_regs : 1; /* APIC MSRs and VP assist page*/ + /* HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL */ + u64 access_hypercall_regs : 1; + u64 access_vp_index : 1; + u64 access_partition_reference_tsc : 1; + u64 access_guest_idle_reg : 1; + u64 access_frequency_regs : 1; + u64 reserved_z12 : 1; + u64 reserved_z13 : 1; + u64 reserved_z14 : 1; + u64 enable_extended_gva_ranges_for_flush_virtual_address_list : 1; + u64 reserved_z16 : 1; + u64 reserved_z17 : 1; + /* Use fast hypercall output. Corresponds to privilege. */ + u64 fast_hypercall_output : 1; + u64 reserved_z19 : 1; + u64 start_virtual_processor : 1; /* Can start VPs */ + u64 reserved_z21 : 1; + /* Synthetic timers in direct mode. */ + u64 direct_synthetic_timers : 1; + u64 reserved_z23 : 1; + u64 extended_processor_masks : 1; + + /* Enable various hypercalls */ + u64 tb_flush_hypercalls : 1; + u64 synthetic_cluster_ipi : 1; + u64 notify_long_spin_wait : 1; + u64 query_numa_distance : 1; + u64 signal_events : 1; + u64 retarget_device_interrupt : 1; + u64 restore_time : 1; + + /* EnlightenedVmcs nested enlightenment is supported. */ + u64 enlightened_vmcs : 1; + u64 reserved : 31; + } __packed; +}; + +#define HV_MAKE_COMPATIBILITY_VERSION(major_, minor_) \ + ((u32)((major_) << 8 | (minor_))) + +#define HV_COMPATIBILITY_21_H2 HV_MAKE_COMPATIBILITY_VERSION(0X6, 0X9) + +union hv_partition_isolation_properties { + u64 as_uint64; + struct { + u64 isolation_type: 5; + u64 isolation_host_type : 2; + u64 rsvd_z: 5; + u64 shared_gpa_boundary_page_number: 52; + } __packed; +}; + +/* + * Various isolation types supported by MSHV. + */ +#define HV_PARTITION_ISOLATION_TYPE_NONE 0 +#define HV_PARTITION_ISOLATION_TYPE_SNP 2 +#define HV_PARTITION_ISOLATION_TYPE_TDX 3 + +/* + * Various host isolation types supported by MSHV. + */ +#define HV_PARTITION_ISOLATION_HOST_TYPE_NONE 0x0 +#define HV_PARTITION_ISOLATION_HOST_TYPE_HARDWARE 0x1 +#define HV_PARTITION_ISOLATION_HOST_TYPE_RESERVED 0x2 + +/* Note: Exo partition is enabled by default */ +#define HV_PARTITION_CREATION_FLAG_EXO_PARTITION BIT(8) +#define HV_PARTITION_CREATION_FLAG_LAPIC_ENABLED BIT(13) +#define HV_PARTITION_CREATION_FLAG_INTERCEPT_MESSAGE_PAGE_ENABLED BIT(19) +#define HV_PARTITION_CREATION_FLAG_X2APIC_CAPABLE BIT(22) + +struct hv_input_create_partition { + u64 flags; + struct hv_proximity_domain_info proximity_domain_info; + u32 compatibility_version; + u32 padding; + struct hv_partition_creation_properties partition_creation_properties; + union hv_partition_isolation_properties isolation_properties; +} __packed; + +struct hv_output_create_partition { + u64 partition_id; +} __packed; + +struct hv_input_initialize_partition { + u64 partition_id; +} __packed; + +struct hv_input_finalize_partition { + u64 partition_id; +} __packed; + +struct hv_input_delete_partition { + u64 partition_id; +} __packed; + +struct hv_input_get_partition_property { + u64 partition_id; + u32 property_code; /* enum hv_partition_property_code */ + u32 padding; +} __packed; + +struct hv_output_get_partition_property { + u64 property_value; +} __packed; + +struct hv_input_set_partition_property { + u64 partition_id; + u32 property_code; /* enum hv_partition_property_code */ + u32 padding; + u64 property_value; +} __packed; + +enum hv_vp_state_page_type { + HV_VP_STATE_PAGE_REGISTERS = 0, + HV_VP_STATE_PAGE_INTERCEPT_MESSAGE = 1, + HV_VP_STATE_PAGE_COUNT +}; + +struct hv_input_map_vp_state_page { + u64 partition_id; + u32 vp_index; + u32 type; /* enum hv_vp_state_page_type */ +} __packed; + +struct hv_output_map_vp_state_page { + u64 map_location; /* GPA page number */ +} __packed; + +struct hv_input_unmap_vp_state_page { + u64 partition_id; + u32 vp_index; + u32 type; /* enum hv_vp_state_page_type */ +} __packed; + +struct hv_opaque_intercept_message { + u32 vp_index; +} __packed; + +enum hv_port_type { + HV_PORT_TYPE_MESSAGE = 1, + HV_PORT_TYPE_EVENT = 2, + HV_PORT_TYPE_MONITOR = 3, + HV_PORT_TYPE_DOORBELL = 4 /* Root Partition only */ +}; + +struct hv_port_info { + u32 port_type; /* enum hv_port_type */ + u32 padding; + union { + struct { + u32 target_sint; + u32 target_vp; + u64 rsvdz; + } message_port_info; + struct { + u32 target_sint; + u32 target_vp; + u16 base_flag_number; + u16 flag_count; + u32 rsvdz; + } event_port_info; + struct { + u64 monitor_address; + u64 rsvdz; + } monitor_port_info; + struct { + u32 target_sint; + u32 target_vp; + u64 rsvdz; + } doorbell_port_info; + }; +} __packed; + +struct hv_connection_info { + u32 port_type; + u32 padding; + union { + struct { + u64 rsvdz; + } message_connection_info; + struct { + u64 rsvdz; + } event_connection_info; + struct { + u64 monitor_address; + } monitor_connection_info; + struct { + u64 gpa; + u64 trigger_value; + u64 flags; + } doorbell_connection_info; + }; +} __packed; + +/* Define synthetic interrupt controller flag constants. */ +#define HV_EVENT_FLAGS_COUNT (256 * 8) +#define HV_EVENT_FLAGS_BYTE_COUNT (256) +#define HV_EVENT_FLAGS32_COUNT (256 / sizeof(u32)) + +/* linux side we create long version of flags to use long bit ops on flags */ +#define HV_EVENT_FLAGS_UL_COUNT (256 / sizeof(ulong)) + +/* Define the synthetic interrupt controller event flags format. */ +union hv_synic_event_flags { + unsigned char flags8[HV_EVENT_FLAGS_BYTE_COUNT]; + u32 flags32[HV_EVENT_FLAGS32_COUNT]; + ulong flags[HV_EVENT_FLAGS_UL_COUNT]; /* linux only */ +}; + +struct hv_synic_event_flags_page { + volatile union hv_synic_event_flags event_flags[HV_SYNIC_SINT_COUNT]; +}; + +#define HV_SYNIC_EVENT_RING_MESSAGE_COUNT 63 + +struct hv_synic_event_ring { + u8 signal_masked; + u8 ring_full; + u16 reserved_z; + u32 data[HV_SYNIC_EVENT_RING_MESSAGE_COUNT]; +} __packed; + +struct hv_synic_event_ring_page { + struct hv_synic_event_ring sint_event_ring[HV_SYNIC_SINT_COUNT]; +}; + +/* Define SynIC control register. */ +union hv_synic_scontrol { + u64 as_uint64; + struct { + u64 enable : 1; + u64 reserved : 63; + } __packed; +}; + +/* Define the format of the SIEFP register */ +union hv_synic_siefp { + u64 as_uint64; + struct { + u64 siefp_enabled : 1; + u64 preserved : 11; + u64 base_siefp_gpa : 52; + } __packed; +}; + +union hv_synic_sirbp { + u64 as_uint64; + struct { + u64 sirbp_enabled : 1; + u64 preserved : 11; + u64 base_sirbp_gpa : 52; + } __packed; +}; + +union hv_interrupt_control { + u64 as_uint64; + struct { + u32 interrupt_type; /* enum hv_interrupt_type */ + u32 level_triggered : 1; + u32 logical_dest_mode : 1; + u32 rsvd : 30; + } __packed; +}; + +struct hv_stimer_state { + struct { + u32 undelivered_msg_pending : 1; + u32 reserved : 31; + } __packed flags; + u32 resvd; + u64 config; + u64 count; + u64 adjustment; + u64 undelivered_exp_time; +} __packed; + +struct hv_synthetic_timers_state { + struct hv_stimer_state timers[HV_SYNIC_STIMER_COUNT]; + u64 reserved[5]; +} __packed; + +union hv_input_delete_vp { + u64 as_uint64[2]; + struct { + u64 partition_id; + u32 vp_index; + u8 reserved[4]; + } __packed; +} __packed; + +struct hv_input_assert_virtual_interrupt { + u64 partition_id; + union hv_interrupt_control control; + u64 dest_addr; /* cpu's apic id */ + u32 vector; + u8 target_vtl; + u8 rsvd_z0; + u16 rsvd_z1; +} __packed; + +struct hv_input_create_port { + u64 port_partition_id; + union hv_port_id port_id; + u8 port_vtl; + u8 min_connection_vtl; + u16 padding; + u64 connection_partition_id; + struct hv_port_info port_info; + struct hv_proximity_domain_info proximity_domain_info; +} __packed; + +union hv_input_delete_port { + u64 as_uint64[2]; + struct { + u64 port_partition_id; + union hv_port_id port_id; + u32 reserved; + }; +} __packed; + +struct hv_input_connect_port { + u64 connection_partition_id; + union hv_connection_id connection_id; + u8 connection_vtl; + u8 rsvdz0; + u16 rsvdz1; + u64 port_partition_id; + union hv_port_id port_id; + u32 reserved2; + struct hv_connection_info connection_info; + struct hv_proximity_domain_info proximity_domain_info; +} __packed; + +union hv_input_disconnect_port { + u64 as_uint64[2]; + struct { + u64 connection_partition_id; + union hv_connection_id connection_id; + u32 is_doorbell: 1; + u32 reserved: 31; + } __packed; +} __packed; + +union hv_input_notify_port_ring_empty { + u64 as_uint64; + struct { + u32 sint_index; + u32 reserved; + }; +} __packed; + +struct hv_vp_state_data_xsave { + u64 flags; + union hv_x64_xsave_xfem_register states; +} __packed; + +/* + * For getting and setting VP state, there are two options based on the state type: + * + * 1.) Data that is accessed by PFNs in the input hypercall page. This is used + * for state which may not fit into the hypercall pages. + * 2.) Data that is accessed directly in the input\output hypercall pages. + * This is used for state that will always fit into the hypercall pages. + * + * In the future this could be dynamic based on the size if needed. + * + * Note these hypercalls have an 8-byte aligned variable header size as per the tlfs + */ + +#define HV_GET_SET_VP_STATE_TYPE_PFN BIT(31) + +enum hv_get_set_vp_state_type { + /* HvGetSetVpStateLocalInterruptControllerState - APIC/GIC state */ + HV_GET_SET_VP_STATE_LAPIC_STATE = 0 | HV_GET_SET_VP_STATE_TYPE_PFN, + HV_GET_SET_VP_STATE_XSAVE = 1 | HV_GET_SET_VP_STATE_TYPE_PFN, + HV_GET_SET_VP_STATE_SIM_PAGE = 2 | HV_GET_SET_VP_STATE_TYPE_PFN, + HV_GET_SET_VP_STATE_SIEF_PAGE = 3 | HV_GET_SET_VP_STATE_TYPE_PFN, + HV_GET_SET_VP_STATE_SYNTHETIC_TIMERS = 4, +}; + +struct hv_vp_state_data { + u32 type; + u32 rsvd; + struct hv_vp_state_data_xsave xsave; +} __packed; + +struct hv_input_get_vp_state { + u64 partition_id; + u32 vp_index; + u8 input_vtl; + u8 rsvd0; + u16 rsvd1; + struct hv_vp_state_data state_data; + u64 output_data_pfns[]; +} __packed; + +union hv_output_get_vp_state { + struct hv_synthetic_timers_state synthetic_timers_state; +} __packed; + +union hv_input_set_vp_state_data { + u64 pfns; + u8 bytes; +} __packed; + +struct hv_input_set_vp_state { + u64 partition_id; + u32 vp_index; + u8 input_vtl; + u8 rsvd0; + u16 rsvd1; + struct hv_vp_state_data state_data; + union hv_input_set_vp_state_data data[]; +} __packed; + +/* + * Dispatch state for the VP communicated by the hypervisor to the + * VP-dispatching thread in the root on return from HVCALL_DISPATCH_VP. + */ +enum hv_vp_dispatch_state { + HV_VP_DISPATCH_STATE_INVALID = 0, + HV_VP_DISPATCH_STATE_BLOCKED = 1, + HV_VP_DISPATCH_STATE_READY = 2, +}; + +/* + * Dispatch event that caused the current dispatch state on return from + * HVCALL_DISPATCH_VP. + */ +enum hv_vp_dispatch_event { + HV_VP_DISPATCH_EVENT_INVALID = 0x00000000, + HV_VP_DISPATCH_EVENT_SUSPEND = 0x00000001, + HV_VP_DISPATCH_EVENT_INTERCEPT = 0x00000002, +}; + +#define HV_ROOT_SCHEDULER_MAX_VPS_PER_CHILD_PARTITION 1024 +/* The maximum array size of HV_GENERIC_SET (vp_set) buffer */ +#define HV_GENERIC_SET_QWORD_COUNT(max) (((((max) - 1) >> 6) + 1) + 2) + +struct hv_vp_signal_bitset_scheduler_message { + u64 partition_id; + u32 overflow_count; + u16 vp_count; + u16 reserved; + +#define BITSET_BUFFER_SIZE \ + HV_GENERIC_SET_QWORD_COUNT(HV_ROOT_SCHEDULER_MAX_VPS_PER_CHILD_PARTITION) + union { + struct hv_vpset bitset; + u64 bitset_buffer[BITSET_BUFFER_SIZE]; + } vp_bitset; +#undef BITSET_BUFFER_SIZE +} __packed; + +static_assert(sizeof(struct hv_vp_signal_bitset_scheduler_message) <= + (sizeof(struct hv_message) - sizeof(struct hv_message_header))); + +#define HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT \ + (((sizeof(struct hv_message) - sizeof(struct hv_message_header)) / \ + (sizeof(u64 /* partition id */) + sizeof(u32 /* vp index */))) - 1) + +struct hv_vp_signal_pair_scheduler_message { + u32 overflow_count; + u8 vp_count; + u8 reserved1[3]; + + u64 partition_ids[HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT]; + u32 vp_indexes[HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT]; + + u8 reserved2[4]; +} __packed; + +static_assert(sizeof(struct hv_vp_signal_pair_scheduler_message) == + (sizeof(struct hv_message) - sizeof(struct hv_message_header))); + +/* Input and output structures for HVCALL_DISPATCH_VP */ +#define HV_DISPATCH_VP_FLAG_CLEAR_INTERCEPT_SUSPEND 0x1 +#define HV_DISPATCH_VP_FLAG_ENABLE_CALLER_INTERRUPTS 0x2 +#define HV_DISPATCH_VP_FLAG_SET_CALLER_SPEC_CTRL 0x4 +#define HV_DISPATCH_VP_FLAG_SKIP_VP_SPEC_FLUSH 0x8 +#define HV_DISPATCH_VP_FLAG_SKIP_CALLER_SPEC_FLUSH 0x10 +#define HV_DISPATCH_VP_FLAG_SKIP_CALLER_USER_SPEC_FLUSH 0x20 + +struct hv_input_dispatch_vp { + u64 partition_id; + u32 vp_index; + u32 flags; + u64 time_slice; /* in 100ns */ + u64 spec_ctrl; +} __packed; + +struct hv_output_dispatch_vp { + u32 dispatch_state; /* enum hv_vp_dispatch_state */ + u32 dispatch_event; /* enum hv_vp_dispatch_event */ +} __packed; + +#endif /* _HV_HVHDK_H */ diff --git a/include/hyperv/hvhdk_mini.h b/include/hyperv/hvhdk_mini.h new file mode 100644 index 000000000000..f8a39d3e9ce6 --- /dev/null +++ b/include/hyperv/hvhdk_mini.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Type definitions for the Microsoft Hypervisor. + */ +#ifndef _HV_HVHDK_MINI_H +#define _HV_HVHDK_MINI_H + +#include "hvgdk_mini.h" + +/* + * Doorbell connection_info flags. + */ +#define HV_DOORBELL_FLAG_TRIGGER_SIZE_MASK 0x00000007 +#define HV_DOORBELL_FLAG_TRIGGER_SIZE_ANY 0x00000000 +#define HV_DOORBELL_FLAG_TRIGGER_SIZE_BYTE 0x00000001 +#define HV_DOORBELL_FLAG_TRIGGER_SIZE_WORD 0x00000002 +#define HV_DOORBELL_FLAG_TRIGGER_SIZE_DWORD 0x00000003 +#define HV_DOORBELL_FLAG_TRIGGER_SIZE_QWORD 0x00000004 +#define HV_DOORBELL_FLAG_TRIGGER_ANY_VALUE 0x80000000 + +/* Each generic set contains 64 elements */ +#define HV_GENERIC_SET_SHIFT (6) +#define HV_GENERIC_SET_MASK (63) + +enum hv_generic_set_format { + HV_GENERIC_SET_SPARSE_4K, + HV_GENERIC_SET_ALL, +}; +#define HV_GENERIC_SET_FORMAT hv_generic_set_format + +enum hv_scheduler_type { + HV_SCHEDULER_TYPE_LP = 1, /* Classic scheduler w/o SMT */ + HV_SCHEDULER_TYPE_LP_SMT = 2, /* Classic scheduler w/ SMT */ + HV_SCHEDULER_TYPE_CORE_SMT = 3, /* Core scheduler */ + HV_SCHEDULER_TYPE_ROOT = 4, /* Root / integrated scheduler */ + HV_SCHEDULER_TYPE_MAX +}; + +enum hv_partition_property_code { + /* Privilege properties */ + HV_PARTITION_PROPERTY_PRIVILEGE_FLAGS = 0x00010000, + HV_PARTITION_PROPERTY_SYNTHETIC_PROC_FEATURES = 0x00010001, + + /* Resource properties */ + HV_PARTITION_PROPERTY_GPA_PAGE_ACCESS_TRACKING = 0x00050005, + HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION = 0x00050017, + + /* Compatibility properties */ + HV_PARTITION_PROPERTY_PROCESSOR_XSAVE_FEATURES = 0x00060002, + HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE = 0x00060008, + HV_PARTITION_PROPERTY_PROCESSOR_CLOCK_FREQUENCY = 0x00060009, +}; + +enum hv_system_property { + /* Add more values when needed */ + HV_SYSTEM_PROPERTY_SCHEDULER_TYPE = 15, +}; + +struct hv_input_get_system_property { + u32 property_id; /* enum hv_system_property */ + union { + u32 as_uint32; + /* More fields to be filled in when needed */ + }; +} __packed; + +struct hv_output_get_system_property { + union { + u32 scheduler_type; /* enum hv_scheduler_type */ + }; +} __packed; + +struct hv_proximity_domain_flags { + u32 proximity_preferred : 1; + u32 reserved : 30; + u32 proximity_info_valid : 1; +} __packed; + +struct hv_proximity_domain_info { + u32 domain_id; + struct hv_proximity_domain_flags flags; +} __packed; + +/* HvDepositMemory hypercall */ +struct hv_deposit_memory { /* HV_INPUT_DEPOSIT_MEMORY */ + u64 partition_id; + u64 gpa_page_list[]; +} __packed; + +struct hv_input_withdraw_memory { + u64 partition_id; + struct hv_proximity_domain_info proximity_domain_info; +} __packed; + +struct hv_output_withdraw_memory { + DECLARE_FLEX_ARRAY(u64, gpa_page_list); +} __packed; + +/* HV Map GPA (Guest Physical Address) Flags */ +#define HV_MAP_GPA_PERMISSIONS_NONE 0x0 +#define HV_MAP_GPA_READABLE 0x1 +#define HV_MAP_GPA_WRITABLE 0x2 +#define HV_MAP_GPA_KERNEL_EXECUTABLE 0x4 +#define HV_MAP_GPA_USER_EXECUTABLE 0x8 +#define HV_MAP_GPA_EXECUTABLE 0xC +#define HV_MAP_GPA_PERMISSIONS_MASK 0xF +#define HV_MAP_GPA_ADJUSTABLE 0x8000 +#define HV_MAP_GPA_NO_ACCESS 0x10000 +#define HV_MAP_GPA_NOT_CACHED 0x200000 +#define HV_MAP_GPA_LARGE_PAGE 0x80000000 + +struct hv_input_map_gpa_pages { + u64 target_partition_id; + u64 target_gpa_base; + u32 map_flags; + u32 padding; + u64 source_gpa_page_list[]; +} __packed; + +union hv_gpa_page_access_state_flags { + struct { + u64 clear_accessed : 1; + u64 set_accessed : 1; + u64 clear_dirty : 1; + u64 set_dirty : 1; + u64 reserved : 60; + } __packed; + u64 as_uint64; +}; + +struct hv_input_get_gpa_pages_access_state { + u64 partition_id; + union hv_gpa_page_access_state_flags flags; + u64 hv_gpa_page_number; +} __packed; + +union hv_gpa_page_access_state { + struct { + u8 accessed : 1; + u8 dirty : 1; + u8 reserved: 6; + }; + u8 as_uint8; +} __packed; + +struct hv_lp_startup_status { + u64 hv_status; + u64 substatus1; + u64 substatus2; + u64 substatus3; + u64 substatus4; + u64 substatus5; + u64 substatus6; +} __packed; + +struct hv_input_add_logical_processor { + u32 lp_index; + u32 apic_id; + struct hv_proximity_domain_info proximity_domain_info; +} __packed; + +struct hv_output_add_logical_processor { + struct hv_lp_startup_status startup_status; +} __packed; + +enum { /* HV_SUBNODE_TYPE */ + HV_SUBNODE_ANY = 0, + HV_SUBNODE_SOCKET, + HV_SUBNODE_CLUSTER, + HV_SUBNODE_L3, + HV_SUBNODE_COUNT, + HV_SUBNODE_INVALID = -1 +}; + +struct hv_create_vp { /* HV_INPUT_CREATE_VP */ + u64 partition_id; + u32 vp_index; + u8 padding[3]; + u8 subnode_type; + u64 subnode_id; + struct hv_proximity_domain_info proximity_domain_info; + u64 flags; +} __packed; + +/* HV_INTERRUPT_TRIGGER_MODE */ +enum hv_interrupt_trigger_mode { + HV_INTERRUPT_TRIGGER_MODE_EDGE = 0, + HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1, +}; + +/* HV_DEVICE_INTERRUPT_DESCRIPTOR */ +struct hv_device_interrupt_descriptor { + u32 interrupt_type; + u32 trigger_mode; + u32 vector_count; + u32 reserved; + struct hv_device_interrupt_target target; +} __packed; + +/* HV_INPUT_MAP_DEVICE_INTERRUPT */ +struct hv_input_map_device_interrupt { + u64 partition_id; + u64 device_id; + u32 flags; + u32 base_irt_idx; + struct hv_interrupt_entry logical_interrupt_entry; + struct hv_device_interrupt_descriptor interrupt_descriptor; +} __packed; + +/* HV_OUTPUT_MAP_DEVICE_INTERRUPT */ +struct hv_output_map_device_interrupt { + struct hv_interrupt_entry interrupt_entry; +} __packed; + +/* HV_INPUT_UNMAP_DEVICE_INTERRUPT */ +struct hv_input_unmap_device_interrupt { + u64 partition_id; + u64 device_id; + struct hv_interrupt_entry interrupt_entry; + u32 flags; +} __packed; + +#define HV_SOURCE_SHADOW_NONE 0x0 +#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1 + +struct hv_send_ipi_ex { /* HV_INPUT_SEND_SYNTHETIC_CLUSTER_IPI_EX */ + u32 vector; + u32 reserved; + struct hv_vpset vp_set; +} __packed; + +typedef u16 hv_pci_rid; /* HV_PCI_RID */ +typedef u16 hv_pci_segment; /* HV_PCI_SEGMENT */ +typedef u64 hv_logical_device_id; +union hv_pci_bdf { /* HV_PCI_BDF */ + u16 as_uint16; + + struct { + u8 function : 3; + u8 device : 5; + u8 bus; + }; +} __packed; + +union hv_pci_bus_range { + u16 as_uint16; + + struct { + u8 subordinate_bus; + u8 secondary_bus; + }; +} __packed; + +enum hv_device_type { /* HV_DEVICE_TYPE */ + HV_DEVICE_TYPE_LOGICAL = 0, + HV_DEVICE_TYPE_PCI = 1, + HV_DEVICE_TYPE_IOAPIC = 2, + HV_DEVICE_TYPE_ACPI = 3, +}; + +union hv_device_id { /* HV_DEVICE_ID */ + u64 as_uint64; + + struct { + u64 reserved0 : 62; + u64 device_type : 2; + }; + + /* HV_DEVICE_TYPE_LOGICAL */ + struct { + u64 id : 62; + u64 device_type : 2; + } logical; + + /* HV_DEVICE_TYPE_PCI */ + struct { + union { + hv_pci_rid rid; + union hv_pci_bdf bdf; + }; + + hv_pci_segment segment; + union hv_pci_bus_range shadow_bus_range; + + u16 phantom_function_bits : 2; + u16 source_shadow : 1; + + u16 rsvdz0 : 11; + u16 device_type : 2; + } pci; + + /* HV_DEVICE_TYPE_IOAPIC */ + struct { + u8 ioapic_id; + u8 rsvdz0; + u16 rsvdz1; + u16 rsvdz2; + + u16 rsvdz3 : 14; + u16 device_type : 2; + } ioapic; + + /* HV_DEVICE_TYPE_ACPI */ + struct { + u32 input_mapping_base; + u32 input_mapping_count : 30; + u32 device_type : 2; + } acpi; +} __packed; + +#endif /* _HV_HVHDK_MINI_H */ diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h index 8365adf842ef..a6c2897bcc63 100644 --- a/include/keys/system_keyring.h +++ b/include/keys/system_keyring.h @@ -73,7 +73,6 @@ static inline void __init set_machine_trusted_keys(struct key *keyring) } #endif -extern struct pkcs7_message *pkcs7; #ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING extern int mark_hash_blacklisted(const u8 *hash, size_t hash_len, enum blacklist_hash_type hash_type); @@ -93,6 +92,7 @@ static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len) } #endif +struct pkcs7_message; #ifdef CONFIG_SYSTEM_REVOCATION_LIST extern int add_key_to_revocation_list(const char *data, size_t size); extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7); diff --git a/include/kunit/platform_device.h b/include/kunit/platform_device.h index 0fc0999d2420..f8236a8536f7 100644 --- a/include/kunit/platform_device.h +++ b/include/kunit/platform_device.h @@ -2,6 +2,7 @@ #ifndef _KUNIT_PLATFORM_DRIVER_H #define _KUNIT_PLATFORM_DRIVER_H +struct completion; struct kunit; struct platform_device; struct platform_driver; diff --git a/include/kunit/test.h b/include/kunit/test.h index 34b71e42fb10..58dbab60f853 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -312,6 +312,7 @@ static inline void kunit_set_failure(struct kunit *test) } bool kunit_enabled(void); +bool kunit_autorun(void); const char *kunit_action(void); const char *kunit_filter_glob(void); char *kunit_filter(void); @@ -334,7 +335,8 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, int *err); void kunit_free_suite_set(struct kunit_suite_set suite_set); -int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites); +int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites, + bool run_tests); void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6adcd1b92b20..4e495b29c640 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -854,6 +854,11 @@ static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) return NULL; } +static inline acpi_handle acpi_device_handle(struct acpi_device *adev) +{ + return NULL; +} + static inline bool has_acpi_companion(struct device *dev) { return false; diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h index c637e0997f6d..abec23c7744f 100644 --- a/include/linux/adreno-smmu-priv.h +++ b/include/linux/adreno-smmu-priv.h @@ -50,6 +50,11 @@ struct adreno_smmu_fault_info { * the GPU driver must call resume_translation() * @resume_translation: Resume translation after a fault * + * @set_prr_bit: [optional] Configure the GPU's Partially Resident + * Region (PRR) bit in the ACTLR register. + * @set_prr_addr: [optional] Configure the PRR_CFG_*ADDR register with + * the physical address of PRR page passed from GPU + * driver. * * The GPU driver (drm/msm) and adreno-smmu work together for controlling * the GPU's SMMU instance. This is by necessity, as the GPU is directly @@ -67,6 +72,8 @@ struct adreno_smmu_priv { void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info); void (*set_stall)(const void *cookie, bool enabled); void (*resume_translation)(const void *cookie, bool terminate); + void (*set_prr_bit)(const void *cookie, bool set); + void (*set_prr_addr)(const void *cookie, phys_addr_t page_addr); }; #endif /* __ADRENO_SMMU_PRIV_H */ diff --git a/include/linux/aer.h b/include/linux/aer.h index 4b97f38f3fcf..947b63091902 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -16,10 +16,18 @@ #define AER_CORRECTABLE 2 #define DPC_FATAL 3 +/* + * AER and DPC capabilities TLP Logging register sizes (PCIe r6.2, sec 7.8.4 + * & 7.9.14). + */ +#define PCIE_STD_NUM_TLP_HEADERLOG 4 +#define PCIE_STD_MAX_TLP_PREFIXLOG 4 + struct pci_dev; struct pcie_tlp_log { - u32 dw[4]; + u32 dw[PCIE_STD_NUM_TLP_HEADERLOG]; + u32 prefix[PCIE_STD_MAX_TLP_PREFIXLOG]; }; struct aer_capability_regs { @@ -37,8 +45,6 @@ struct aer_capability_regs { u16 uncor_err_source; }; -int pcie_read_tlp_log(struct pci_dev *dev, int where, struct pcie_tlp_log *log); - #if defined(CONFIG_PCIEAER) int pci_aer_clear_nonfatal_status(struct pci_dev *dev); int pcie_aer_is_native(struct pci_dev *dev); diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 0bbbe537c5f9..a946e0203e6d 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -224,9 +224,14 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} #define alloc_hooks_tag(_tag, _do_alloc) \ ({ \ - struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \ - typeof(_do_alloc) _res = _do_alloc; \ - alloc_tag_restore(_tag, _old); \ + typeof(_do_alloc) _res; \ + if (mem_alloc_profiling_enabled()) { \ + struct alloc_tag * __maybe_unused _old; \ + _old = alloc_tag_save(_tag); \ + _res = _do_alloc; \ + alloc_tag_restore(_tag, _old); \ + } else \ + _res = _do_alloc; \ _res; \ }) diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 2b90c48a6a87..062fbd4c9b77 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -31,11 +31,11 @@ struct amd_iommu_pi_data { struct task_struct; struct pci_dev; -extern int amd_iommu_detect(void); +extern void amd_iommu_detect(void); #else /* CONFIG_AMD_IOMMU */ -static inline int amd_iommu_detect(void) { return -ENODEV; } +static inline void amd_iommu_detect(void) { } #endif /* CONFIG_AMD_IOMMU */ diff --git a/include/linux/amd-pmf-io.h b/include/linux/amd-pmf-io.h index b4f818205216..6fa510f419c0 100644 --- a/include/linux/amd-pmf-io.h +++ b/include/linux/amd-pmf-io.h @@ -18,10 +18,12 @@ * enum sfh_message_type - Query the SFH message type * @MT_HPD: Message ID to know the Human presence info from MP2 FW * @MT_ALS: Message ID to know the Ambient light info from MP2 FW + * @MT_SRA: Message ID to know the SRA data from MP2 FW */ enum sfh_message_type { MT_HPD, MT_ALS, + MT_SRA, }; /** @@ -40,10 +42,23 @@ enum sfh_hpd_info { * struct amd_sfh_info - get HPD sensor info from MP2 FW * @ambient_light: Populates the ambient light information * @user_present: Populates the user presence information + * @platform_type: Operating modes (clamshell, flat, tent, etc.) + * @laptop_placement: Device states (ontable, onlap, outbag) */ struct amd_sfh_info { u32 ambient_light; u8 user_present; + u32 platform_type; + u32 laptop_placement; +}; + +enum laptop_placement { + LP_UNKNOWN = 0, + ON_TABLE, + ON_LAP_MOTION, + IN_BAG, + OUT_OF_BAG, + LP_UNDEFINED, }; int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op); diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index e6c00e860951..60d674af3080 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -42,7 +42,14 @@ struct linux_binprm { * Set when errors can no longer be returned to the * original userspace. */ - point_of_no_return:1; + point_of_no_return:1, + /* Set when "comm" must come from the dentry. */ + comm_from_dentry:1, + /* + * Set by user space to check executability according to the + * caller's environment. + */ + is_check:1; struct file *executable; /* Executable to pass to the interpreter */ struct file *interpreter; struct file *file; diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h index dbf0f74c1529..802f52e38efd 100644 --- a/include/linux/bio-integrity.h +++ b/include/linux/bio-integrity.h @@ -7,10 +7,12 @@ enum bip_flags { BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ - BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ - BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ - BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ - BIP_COPY_USER = 1 << 5, /* Kernel bounce buffer in use */ + BIP_DISK_NOCHECK = 1 << 2, /* disable disk integrity checking */ + BIP_IP_CHECKSUM = 1 << 3, /* IP checksum */ + BIP_COPY_USER = 1 << 4, /* Kernel bounce buffer in use */ + BIP_CHECK_GUARD = 1 << 5, /* guard check */ + BIP_CHECK_REFTAG = 1 << 6, /* reftag check */ + BIP_CHECK_APPTAG = 1 << 7, /* apptag check */ }; struct bio_integrity_payload { @@ -21,6 +23,7 @@ struct bio_integrity_payload { unsigned short bip_vcnt; /* # of integrity bio_vecs */ unsigned short bip_max_vcnt; /* integrity bio_vec slots */ unsigned short bip_flags; /* control flags */ + u16 app_tag; /* application tag value */ struct bvec_iter bio_iter; /* for rewinding parent bio */ @@ -30,6 +33,9 @@ struct bio_integrity_payload { struct bio_vec bip_inline_vecs[];/* embedded bvec array */ }; +#define BIP_CLONE_FLAGS (BIP_MAPPED_INTEGRITY | BIP_IP_CHECKSUM | \ + BIP_CHECK_GUARD | BIP_CHECK_REFTAG | BIP_CHECK_APPTAG) + #ifdef CONFIG_BLK_DEV_INTEGRITY #define bip_for_each_vec(bvl, bip, iter) \ @@ -72,7 +78,8 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr); int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset); -int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len); +int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter); +int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta); void bio_integrity_unmap_user(struct bio *bio); bool bio_integrity_prep(struct bio *bio); void bio_integrity_advance(struct bio *bio, unsigned int bytes_done); @@ -98,8 +105,12 @@ static inline void bioset_integrity_free(struct bio_set *bs) { } -static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf, - ssize_t len) +static inline int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter) +{ + return -EINVAL; +} + +static inline int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta) { return -EINVAL; } diff --git a/include/linux/bio.h b/include/linux/bio.h index 7a1b3b1a8fed..4b79bf50f4f0 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -19,9 +19,6 @@ static inline unsigned int bio_max_segs(unsigned int nr_segs) return min(nr_segs, BIO_MAX_VECS); } -#define bio_prio(bio) (bio)->bi_ioprio -#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) - #define bio_iter_iovec(bio, iter) \ bvec_iter_bvec((bio)->bi_io_vec, (iter)) @@ -416,8 +413,6 @@ int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len, unsigned off); bool __must_check bio_add_folio(struct bio *bio, struct folio *folio, size_t len, size_t off); -extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, - unsigned int, unsigned int); void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 262b6596eca5..2026953e2c4e 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -23,7 +23,7 @@ struct device; * * Function implementations generic to all architectures are in * lib/bitmap.c. Functions implementations that are architecture - * specific are in various include/asm-<arch>/bitops.h headers + * specific are in various arch/<arch>/include/asm/bitops.h headers * and other arch/<arch> specific files. * * See lib/bitmap.c for more details. diff --git a/include/linux/bitops.h b/include/linux/bitops.h index ba35bbf07798..c1cb53cf2f0f 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -230,6 +230,37 @@ static inline int get_count_order_long(unsigned long l) } /** + * parity8 - get the parity of an u8 value + * @value: the value to be examined + * + * Determine the parity of the u8 argument. + * + * Returns: + * 0 for even parity, 1 for odd parity + * + * Note: This function informs you about the current parity. Example to bail + * out when parity is odd: + * + * if (parity8(val) == 1) + * return -EBADMSG; + * + * If you need to calculate a parity bit, you need to draw the conclusion from + * this result yourself. Example to enforce odd parity, parity bit is bit 7: + * + * if (parity8(val) == 0) + * val ^= BIT(7); + */ +static inline int parity8(u8 val) +{ + /* + * One explanation of this algorithm: + * https://funloop.org/codex/problem/parity/README.html + */ + val ^= val >> 4; + return (0x6996 >> (val & 0xf)) & 1; +} + +/** * __ffs64 - find first set bit in a 64 bit word * @word: The 64 bit word * diff --git a/include/linux/bits.h b/include/linux/bits.h index 60044b608817..61a75d3f294b 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -20,9 +20,8 @@ */ #if !defined(__ASSEMBLY__) #include <linux/build_bug.h> -#define GENMASK_INPUT_CHECK(h, l) \ - (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ - __is_constexpr((l) > (h)), (l) > (h), 0))) +#include <linux/compiler.h> +#define GENMASK_INPUT_CHECK(h, l) BUILD_BUG_ON_ZERO(const_true((l) > (h))) #else /* * BUILD_BUG_ON_ZERO is not available in h files included from asm files, diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h deleted file mode 100644 index ca544e1d3508..000000000000 --- a/include/linux/blk-mq-pci.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_BLK_MQ_PCI_H -#define _LINUX_BLK_MQ_PCI_H - -struct blk_mq_queue_map; -struct pci_dev; - -void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev, - int offset); - -#endif /* _LINUX_BLK_MQ_PCI_H */ diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h deleted file mode 100644 index 13226e9b22dd..000000000000 --- a/include/linux/blk-mq-virtio.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_BLK_MQ_VIRTIO_H -#define _LINUX_BLK_MQ_VIRTIO_H - -struct blk_mq_queue_map; -struct virtio_device; - -void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap, - struct virtio_device *vdev, int first_vec); - -#endif /* _LINUX_BLK_MQ_VIRTIO_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c596e0e4cb75..a0a9007cc1e3 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -296,13 +296,6 @@ enum blk_eh_timer_return { BLK_EH_RESET_TIMER, }; -/* Keep alloc_policy_name[] in sync with the definitions below */ -enum { - BLK_TAG_ALLOC_FIFO, /* allocate starting from 0 */ - BLK_TAG_ALLOC_RR, /* allocate starting from last allocated tag */ - BLK_TAG_ALLOC_MAX -}; - /** * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware * block device @@ -668,7 +661,6 @@ struct blk_mq_ops { /* Keep hctx_flag_name[] in sync with the definitions below */ enum { - BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, /* * Set when this device requires underlying blk-mq device for @@ -677,23 +669,20 @@ enum { BLK_MQ_F_STACKING = 1 << 2, BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, BLK_MQ_F_BLOCKING = 1 << 4, - /* Do not allow an I/O scheduler to be configured. */ - BLK_MQ_F_NO_SCHED = 1 << 5, + + /* + * Alloc tags on a round-robin base instead of the first available one. + */ + BLK_MQ_F_TAG_RR = 1 << 5, /* * Select 'none' during queue registration in case of a single hwq * or shared hwqs instead of 'mq-deadline'. */ BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6, - BLK_MQ_F_ALLOC_POLICY_START_BIT = 7, - BLK_MQ_F_ALLOC_POLICY_BITS = 1, + + BLK_MQ_F_MAX = 1 << 7, }; -#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ - ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ - ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) -#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ - ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ - << BLK_MQ_F_ALLOC_POLICY_START_BIT) #define BLK_MQ_MAX_DEPTH (10240) #define BLK_MQ_NO_HCTX_IDX (-1U) @@ -921,6 +910,8 @@ void blk_mq_unfreeze_queue_non_owner(struct request_queue *q); void blk_freeze_queue_start_non_owner(struct request_queue *q); void blk_mq_map_queues(struct blk_mq_queue_map *qmap); +void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap, + struct device *dev, unsigned int offset); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); void blk_mq_quiesce_queue_nowait(struct request_queue *q); @@ -977,14 +968,6 @@ static inline void blk_mq_cleanup_rq(struct request *rq) rq->q->mq_ops->cleanup_rq(rq); } -static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, - unsigned int nr_segs) -{ - rq->nr_phys_segments = nr_segs; - rq->__data_len = bio->bi_iter.bi_size; - rq->bio = rq->biotail = bio; -} - void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, struct lock_class_key *key); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 378d3a1a22fc..76f0a4e7c2e5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -331,8 +331,8 @@ typedef unsigned int __bitwise blk_features_t; #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \ ((__force blk_features_t)(1u << 15)) -/* stacked device can/does support atomic writes */ -#define BLK_FEAT_ATOMIC_WRITES_STACKED \ +/* atomic writes enabled */ +#define BLK_FEAT_ATOMIC_WRITES \ ((__force blk_features_t)(1u << 16)) /* @@ -581,6 +581,12 @@ struct request_queue { #ifdef CONFIG_LOCKDEP struct task_struct *mq_freeze_owner; int mq_freeze_owner_depth; + /* + * Records disk & queue state in current context, used in unfreeze + * queue + */ + bool mq_freeze_disk_dead; + bool mq_freeze_queue_dying; #endif wait_queue_head_t mq_freeze_wq; /* @@ -938,8 +944,7 @@ static inline unsigned int blk_boundary_sectors_left(sector_t offset, * the caller can modify. The caller must call queue_limits_commit_update() * to finish the update. * - * Context: process context. The caller must have frozen the queue or ensured - * that there is outstanding I/O by other means. + * Context: process context. */ static inline struct queue_limits queue_limits_start_update(struct request_queue *q) @@ -947,6 +952,8 @@ queue_limits_start_update(struct request_queue *q) mutex_lock(&q->limits_lock); return q->limits; } +int queue_limits_commit_update_frozen(struct request_queue *q, + struct queue_limits *lim); int queue_limits_commit_update(struct request_queue *q, struct queue_limits *lim); int queue_limits_set(struct request_queue *q, struct queue_limits *lim); @@ -1699,6 +1706,15 @@ struct io_comp_batch { void (*complete)(struct io_comp_batch *); }; +static inline bool blk_atomic_write_start_sect_aligned(sector_t sector, + struct queue_limits *limits) +{ + unsigned int alignment = max(limits->atomic_write_hw_unit_min, + limits->atomic_write_hw_boundary); + + return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT); +} + static inline bool bdev_can_atomic_write(struct block_device *bdev) { struct request_queue *bd_queue = bdev->bd_queue; @@ -1707,15 +1723,9 @@ static inline bool bdev_can_atomic_write(struct block_device *bdev) if (!limits->atomic_write_unit_min) return false; - if (bdev_is_partition(bdev)) { - sector_t bd_start_sect = bdev->bd_start_sect; - unsigned int alignment = - max(limits->atomic_write_unit_min, - limits->atomic_write_hw_boundary); - - if (!IS_ALIGNED(bd_start_sect, alignment >> SECTOR_SHIFT)) - return false; - } + if (bdev_is_partition(bdev)) + return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect, + limits); return true; } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6e63dd3443b9..f3f50e29d639 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2299,6 +2299,14 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); struct bpf_map *bpf_map_get(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd); +/* + * The __bpf_map_get() and __btf_get_by_fd() functions parse a file + * descriptor and return a corresponding map or btf object. + * Their names are double underscored to emphasize the fact that they + * do not increase refcnt. To also increase refcnt use corresponding + * bpf_map_get() and btf_get_by_fd() functions. + */ + static inline struct bpf_map *__bpf_map_get(struct fd f) { if (fd_empty(f)) @@ -2308,6 +2316,15 @@ static inline struct bpf_map *__bpf_map_get(struct fd f) return fd_file(f)->private_data; } +static inline struct btf *__btf_get_by_fd(struct fd f) +{ + if (fd_empty(f)) + return ERR_PTR(-EBADF); + if (unlikely(fd_file(f)->f_op != &btf_fops)) + return ERR_PTR(-EINVAL); + return fd_file(f)->private_data; +} + void bpf_map_inc(struct bpf_map *map); void bpf_map_inc_with_uref(struct bpf_map *map); struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref); @@ -2589,10 +2606,10 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, struct bpf_map *map, bool exclude_ingress); int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, - struct bpf_prog *xdp_prog); + const struct bpf_prog *xdp_prog); int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, - struct bpf_prog *xdp_prog, struct bpf_map *map, - bool exclude_ingress); + const struct bpf_prog *xdp_prog, + struct bpf_map *map, bool exclude_ingress); void __cpu_map_flush(struct list_head *flush_list); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, @@ -2862,15 +2879,15 @@ struct sk_buff; static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, - struct bpf_prog *xdp_prog) + const struct bpf_prog *xdp_prog) { return 0; } static inline int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, - struct bpf_prog *xdp_prog, struct bpf_map *map, - bool exclude_ingress) + const struct bpf_prog *xdp_prog, + struct bpf_map *map, bool exclude_ingress) { return 0; } diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 48b7b2eeb7e2..32c23f2a3086 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -233,6 +233,7 @@ enum bpf_stack_slot_type { */ STACK_DYNPTR, STACK_ITER, + STACK_IRQ_FLAG, }; #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ @@ -254,8 +255,9 @@ struct bpf_reference_state { * default to pointer reference on zero initialization of a state. */ enum ref_state_type { - REF_TYPE_PTR = 0, - REF_TYPE_LOCK, + REF_TYPE_PTR = 1, + REF_TYPE_IRQ = 2, + REF_TYPE_LOCK = 3, } type; /* Track each reference created with a unique id, even if the same * instruction creates the reference multiple times (eg, via CALL). @@ -315,9 +317,6 @@ struct bpf_func_state { u32 callback_depth; /* The following fields should be last. See copy_func_state() */ - int acquired_refs; - int active_locks; - struct bpf_reference_state *refs; /* The state of the stack. Each element of the array describes BPF_REG_SIZE * (i.e. 8) bytes worth of stack memory. * stack[0] represents bytes [*(r10-8)..*(r10-1)] @@ -370,6 +369,8 @@ struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; struct bpf_verifier_state *parent; + /* Acquired reference states */ + struct bpf_reference_state *refs; /* * 'branches' field is the number of branches left to explore: * 0 - all possible paths from this state reached bpf_exit or @@ -419,9 +420,13 @@ struct bpf_verifier_state { u32 insn_idx; u32 curframe; - bool speculative; + u32 acquired_refs; + u32 active_locks; + u32 active_preempt_locks; + u32 active_irq_id; bool active_rcu_lock; - u32 active_preempt_lock; + + bool speculative; /* If this state was ever pointed-to by other state's loop_entry field * this flag would be set to true. Used to avoid freeing such states * while they are still in use. @@ -980,8 +985,9 @@ const char *dynptr_type_str(enum bpf_dynptr_type type); const char *iter_type_str(const struct btf *btf, u32 btf_id); const char *iter_state_str(enum bpf_iter_state state); -void print_verifier_state(struct bpf_verifier_env *env, - const struct bpf_func_state *state, bool print_all); -void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state); +void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, + u32 frameno, bool print_all); +void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, + u32 frameno); #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/btf.h b/include/linux/btf.h index 4214e76c9168..2a08a2b55592 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -353,6 +353,11 @@ static inline bool btf_type_is_scalar(const struct btf_type *t) return btf_type_is_int(t) || btf_type_is_enum(t); } +static inline bool btf_type_is_fwd(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_FWD; +} + static inline bool btf_type_is_typedef(const struct btf_type *t) { return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF; diff --git a/include/linux/bug.h b/include/linux/bug.h index 348acf2558f3..a9948a9f1093 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -73,15 +73,23 @@ static inline void generic_bug_clear_once(void) {} #endif /* CONFIG_GENERIC_BUG */ +#ifdef CONFIG_PRINTK +void mem_dump_obj(void *object); +#else +static inline void mem_dump_obj(void *object) {} +#endif + /* * Since detected data corruption should stop operation on the affected * structures. Return value must be checked and sanely acted on by caller. */ static inline __must_check bool check_data_corruption(bool v) { return v; } -#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ +#define CHECK_DATA_CORRUPTION(condition, addr, fmt, ...) \ check_data_corruption(({ \ bool corruption = unlikely(condition); \ if (corruption) { \ + if (addr) \ + mem_dump_obj(addr); \ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ pr_err(fmt, ##__VA_ARGS__); \ BUG(); \ diff --git a/include/linux/bvec.h b/include/linux/bvec.h index f41c7f0ef91e..ba8f52d48b94 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -286,12 +286,7 @@ static inline void *bvec_virt(struct bio_vec *bvec) */ static inline phys_addr_t bvec_phys(const struct bio_vec *bvec) { - /* - * Note this open codes page_to_phys because page_to_phys is defined in - * <asm/io.h>, which we don't want to pull in here. If it ever moves to - * a sensible place we should start using it. - */ - return PFN_PHYS(page_to_pfn(bvec->bv_page)) + bvec->bv_offset; + return page_to_phys(bvec->bv_page) + bvec->bv_offset; } #endif /* __LINUX_BVEC_H */ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 7ad736538649..c8f4f0a0b874 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -147,7 +147,7 @@ static inline int get_cpu_cacheinfo_id(int cpu, int level) return ci ? ci->id : -1; } -#ifdef CONFIG_ARM64 +#if defined(CONFIG_ARM64) || defined(CONFIG_ARM) #define use_arch_cache_info() (true) #else #define use_arch_cache_info() (false) diff --git a/include/linux/call_once.h b/include/linux/call_once.h new file mode 100644 index 000000000000..6261aa0b3fb0 --- /dev/null +++ b/include/linux/call_once.h @@ -0,0 +1,45 @@ +#ifndef _LINUX_CALL_ONCE_H +#define _LINUX_CALL_ONCE_H + +#include <linux/types.h> +#include <linux/mutex.h> + +#define ONCE_NOT_STARTED 0 +#define ONCE_RUNNING 1 +#define ONCE_COMPLETED 2 + +struct once { + atomic_t state; + struct mutex lock; +}; + +static inline void __once_init(struct once *once, const char *name, + struct lock_class_key *key) +{ + atomic_set(&once->state, ONCE_NOT_STARTED); + __mutex_init(&once->lock, name, key); +} + +#define once_init(once) \ +do { \ + static struct lock_class_key __key; \ + __once_init((once), #once, &__key); \ +} while (0) + +static inline void call_once(struct once *once, void (*cb)(struct once *)) +{ + /* Pairs with atomic_set_release() below. */ + if (atomic_read_acquire(&once->state) == ONCE_COMPLETED) + return; + + guard(mutex)(&once->lock); + WARN_ON(atomic_read(&once->state) == ONCE_RUNNING); + if (atomic_read(&once->state) != ONCE_NOT_STARTED) + return; + + atomic_set(&once->state, ONCE_RUNNING); + cb(once); + atomic_set_release(&once->state, ONCE_COMPLETED); +} + +#endif /* _LINUX_CALL_ONCE_H */ diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h index caa4b4430634..0bf7d33a1048 100644 --- a/include/linux/cc_platform.h +++ b/include/linux/cc_platform.h @@ -82,6 +82,14 @@ enum cc_attr { CC_ATTR_GUEST_SEV_SNP, /** + * @CC_ATTR_GUEST_SNP_SECURE_TSC: SNP Secure TSC is active. + * + * The platform/OS is running as a guest/virtual machine and actively + * using AMD SEV-SNP Secure TSC feature. + */ + CC_ATTR_GUEST_SNP_SECURE_TSC, + + /** * @CC_ATTR_HOST_SEV_SNP: AMD SNP enabled on the host. * * The host kernel is running with the necessary features diff --git a/include/linux/cgroup_dmem.h b/include/linux/cgroup_dmem.h new file mode 100644 index 000000000000..dd4869f1d736 --- /dev/null +++ b/include/linux/cgroup_dmem.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023-2024 Intel Corporation + */ + +#ifndef _CGROUP_DMEM_H +#define _CGROUP_DMEM_H + +#include <linux/types.h> +#include <linux/llist.h> + +struct dmem_cgroup_pool_state; + +/* Opaque definition of a cgroup region, used internally */ +struct dmem_cgroup_region; + +#if IS_ENABLED(CONFIG_CGROUP_DMEM) +struct dmem_cgroup_region *dmem_cgroup_register_region(u64 size, const char *name_fmt, ...) __printf(2,3); +void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region); +int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size, + struct dmem_cgroup_pool_state **ret_pool, + struct dmem_cgroup_pool_state **ret_limit_pool); +void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size); +bool dmem_cgroup_state_evict_valuable(struct dmem_cgroup_pool_state *limit_pool, + struct dmem_cgroup_pool_state *test_pool, + bool ignore_low, bool *ret_hit_low); + +void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool); +#else +static inline __printf(2,3) struct dmem_cgroup_region * +dmem_cgroup_register_region(u64 size, const char *name_fmt, ...) +{ + return NULL; +} + +static inline void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region) +{ } + +static inline int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size, + struct dmem_cgroup_pool_state **ret_pool, + struct dmem_cgroup_pool_state **ret_limit_pool) +{ + *ret_pool = NULL; + + if (ret_limit_pool) + *ret_limit_pool = NULL; + + return 0; +} + +static inline void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size) +{ } + +static inline +bool dmem_cgroup_state_evict_valuable(struct dmem_cgroup_pool_state *limit_pool, + struct dmem_cgroup_pool_state *test_pool, + bool ignore_low, bool *ret_hit_low) +{ + return true; +} + +static inline void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool) +{ } + +#endif +#endif /* _CGROUP_DMEM_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 445235487230..3fd0bcbf3080 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -65,6 +65,10 @@ SUBSYS(rdma) SUBSYS(misc) #endif +#if IS_ENABLED(CONFIG_CGROUP_DMEM) +SUBSYS(dmem) +#endif + /* * The following subsystems are not supported on the default hierarchy. */ diff --git a/include/linux/clk.h b/include/linux/clk.h index 1dcee6d701e4..b607482ca77e 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -1138,15 +1138,6 @@ static inline void clk_restore_context(void) {} #endif -/* Deprecated. Use devm_clk_bulk_get_all_enabled() */ -static inline int __must_check -devm_clk_bulk_get_all_enable(struct device *dev, struct clk_bulk_data **clks) -{ - int ret = devm_clk_bulk_get_all_enabled(dev, clks); - - return ret > 0 ? 0 : ret; -} - /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ static inline int clk_prepare_enable(struct clk *clk) { diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index d0ed9583743f..c9b58188ec61 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -52,18 +52,6 @@ */ #define barrier_before_unreachable() asm volatile("") -/* - * Mark a position in code as unreachable. This can be used to - * suppress control flow warnings after asm blocks that transfer - * control elsewhere. - */ -#define unreachable() \ - do { \ - annotate_unreachable(); \ - barrier_before_unreachable(); \ - __builtin_unreachable(); \ - } while (0) - #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 240c632c5b95..b087de2f3e94 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -109,44 +109,21 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, /* Unreachable code */ #ifdef CONFIG_OBJTOOL -/* - * These macros help objtool understand GCC code flow for unreachable code. - * The __COUNTER__ based labels are a hack to make each instance of the macros - * unique, to convince GCC not to merge duplicate inline asm statements. - */ -#define __stringify_label(n) #n - -#define __annotate_reachable(c) ({ \ - asm volatile(__stringify_label(c) ":\n\t" \ - ".pushsection .discard.reachable\n\t" \ - ".long " __stringify_label(c) "b - .\n\t" \ - ".popsection\n\t"); \ -}) -#define annotate_reachable() __annotate_reachable(__COUNTER__) - -#define __annotate_unreachable(c) ({ \ - asm volatile(__stringify_label(c) ":\n\t" \ - ".pushsection .discard.unreachable\n\t" \ - ".long " __stringify_label(c) "b - .\n\t" \ - ".popsection\n\t" : : "i" (c)); \ -}) -#define annotate_unreachable() __annotate_unreachable(__COUNTER__) - /* Annotate a C jump table to allow objtool to follow the code flow */ #define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #") - #else /* !CONFIG_OBJTOOL */ -#define annotate_reachable() -#define annotate_unreachable() #define __annotate_jump_table #endif /* CONFIG_OBJTOOL */ -#ifndef unreachable -# define unreachable() do { \ - annotate_unreachable(); \ +/* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer + * control elsewhere. + */ +#define unreachable() do { \ + barrier_before_unreachable(); \ __builtin_unreachable(); \ } while (0) -#endif /* * KENTRY - kernel entry point @@ -331,6 +308,28 @@ static inline void *offset_to_ptr(const int *off) #define statically_true(x) (__builtin_constant_p(x) && (x)) /* + * Similar to statically_true() but produces a constant expression + * + * To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(), + * which require their input to be a constant expression and for which + * statically_true() would otherwise fail. + * + * This is a trade-off: const_true() requires all its operands to be + * compile time constants. Else, it would always returns false even on + * the most trivial cases like: + * + * true || non_const_var + * + * On the opposite, statically_true() is able to fold more complex + * tautologies and will return true on expressions such as: + * + * !(non_const_var * 8 % 4) + * + * For the general case, statically_true() is better. + */ +#define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false) + +/* * This is needed in functions which generate the stack canary, see * arch/x86/kernel/smpboot.c::start_secondary() for an example. */ diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 45e598fe3476..77e6e195d1d6 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -52,8 +52,8 @@ extern void do_coredump(const kernel_siginfo_t *siginfo); #define __COREDUMP_PRINTK(Level, Format, ...) \ do { \ char comm[TASK_COMM_LEN]; \ - \ - get_task_comm(comm, current); \ + /* This will always be NUL terminated. */ \ + memcpy(comm, current->comm, sizeof(comm)); \ printk_ratelimited(Level "coredump: %d(%*pE): " Format "\n", \ task_tgid_vnr(current), (int)strlen(comm), comm, ##__VA_ARGS__); \ } while (0) \ diff --git a/include/linux/coresight.h b/include/linux/coresight.h index c13342594278..17276965ff1d 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -172,6 +172,9 @@ struct coresight_desc { * @dest_dev: a @coresight_device representation of the component connected to @src_port. NULL until the device is created * @link: Representation of the connection as a sysfs link. + * @filter_src_fwnode: filter source component's fwnode handle. + * @filter_src_dev: a @coresight_device representation of the component that + needs to be filtered. * * The full connection structure looks like this, where in_conns store * references to same connection as the source device's out_conns. @@ -200,8 +203,10 @@ struct coresight_connection { struct coresight_device *dest_dev; struct coresight_sysfs_link *link; struct coresight_device *src_dev; - atomic_t src_refcnt; - atomic_t dest_refcnt; + struct fwnode_handle *filter_src_fwnode; + struct coresight_device *filter_src_dev; + int src_refcnt; + int dest_refcnt; }; /** @@ -588,9 +593,14 @@ static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 o } #endif /* CONFIG_64BIT */ +static inline bool coresight_is_device_source(struct coresight_device *csdev) +{ + return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE); +} + static inline bool coresight_is_percpu_source(struct coresight_device *csdev) { - return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) && + return csdev && coresight_is_device_source(csdev) && (csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC); } @@ -662,6 +672,7 @@ void coresight_relaxed_write64(struct coresight_device *csdev, void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset); extern int coresight_get_cpu(struct device *dev); +extern int coresight_get_static_trace_id(struct device *dev, u32 *id); struct coresight_platform_data *coresight_get_platform_data(struct device *dev); struct coresight_connection * diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index a04b73c40173..6cc5e484547c 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -240,6 +240,7 @@ enum cpuhp_state { CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RANDOM_ONLINE, CPUHP_AP_RCUTREE_ONLINE, + CPUHP_AP_KTHREADS_ONLINE, CPUHP_AP_BASE_CACHEINFO_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 9278a50d514f..36a890d0dd57 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -391,7 +391,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits) /** - * cpumask_any_but - return a "random" in a cpumask, but not this one. + * cpumask_any_but - return an arbitrary cpu in a cpumask, but not this one. * @mask: the cpumask to search * @cpu: the cpu to ignore. * @@ -411,7 +411,7 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) } /** - * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one. + * cpumask_any_and_but - pick an arbitrary cpu from *mask1 & *mask2, but not this one. * @mask1: the first input cpumask * @mask2: the second input cpumask * @cpu: the cpu to ignore @@ -840,7 +840,7 @@ void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp) } /** - * cpumask_any - pick a "random" cpu from *srcp + * cpumask_any - pick an arbitrary cpu from *srcp * @srcp: the input cpumask * * Return: >= nr_cpu_ids if no cpus set. @@ -848,7 +848,7 @@ void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp) #define cpumask_any(srcp) cpumask_first(srcp) /** - * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2 + * cpumask_any_and - pick an arbitrary cpu from *mask1 & *mask2 * @mask1: the first input cpumask * @mask2: the second input cpumask * @@ -1043,7 +1043,6 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); /* Wrappers for arch boot code to manipulate normally-constant masks */ void init_cpu_present(const struct cpumask *src); void init_cpu_possible(const struct cpumask *src); -void init_cpu_online(const struct cpumask *src); #define assign_cpu(cpu, mask, val) \ assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val)) diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index acc55626afdc..2f2555e6407c 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -20,6 +20,8 @@ extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size); extern void elfcorehdr_free(unsigned long long addr); extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos); extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); +void elfcorehdr_fill_device_ram_ptload_elf64(Elf64_Phdr *phdr, + unsigned long long paddr, unsigned long long size); extern int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); @@ -99,6 +101,12 @@ static inline void vmcore_unusable(void) * indicated in the vmcore instead. For example, a ballooned page * contains no data and reading from such a page will cause high * load in the hypervisor. + * @get_device_ram: query RAM ranges that can only be detected by device + * drivers, such as the virtio-mem driver, so they can be included in + * the crash dump on architectures that allocate the elfcore hdr in the dump + * ("2nd") kernel. Indicated RAM ranges may contain holes to reduce the + * total number of ranges; such holes can be detected using the pfn_is_ram + * callback just like for other RAM. * @next: List head to manage registered callbacks internally; initialized by * register_vmcore_cb(). * @@ -109,11 +117,44 @@ static inline void vmcore_unusable(void) */ struct vmcore_cb { bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn); + int (*get_device_ram)(struct vmcore_cb *cb, struct list_head *list); struct list_head next; }; extern void register_vmcore_cb(struct vmcore_cb *cb); extern void unregister_vmcore_cb(struct vmcore_cb *cb); +struct vmcore_range { + struct list_head list; + unsigned long long paddr; + unsigned long long size; + loff_t offset; +}; + +/* Allocate a vmcore range and add it to the list. */ +static inline int vmcore_alloc_add_range(struct list_head *list, + unsigned long long paddr, unsigned long long size) +{ + struct vmcore_range *m = kzalloc(sizeof(*m), GFP_KERNEL); + + if (!m) + return -ENOMEM; + m->paddr = paddr; + m->size = size; + list_add_tail(&m->list, list); + return 0; +} + +/* Free a list of vmcore ranges. */ +static inline void vmcore_free_ranges(struct list_head *list) +{ + struct vmcore_range *m, *tmp; + + list_for_each_entry_safe(m, tmp, list, list) { + list_del(&m->list); + kfree(m); + } +} + #else /* !CONFIG_CRASH_DUMP */ static inline bool is_kdump_kernel(void) { return false; } #endif /* CONFIG_CRASH_DUMP */ diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h index 6bb0c0bf357b..16787c1cee21 100644 --- a/include/linux/crc-t10dif.h +++ b/include/linux/crc-t10dif.h @@ -6,11 +6,29 @@ #define CRC_T10DIF_DIGEST_SIZE 2 #define CRC_T10DIF_BLOCK_SIZE 1 -#define CRC_T10DIF_STRING "crct10dif" -extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, - size_t len); -extern __u16 crc_t10dif(unsigned char const *, size_t); -extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t); +u16 crc_t10dif_arch(u16 crc, const u8 *p, size_t len); +u16 crc_t10dif_generic(u16 crc, const u8 *p, size_t len); + +static inline u16 crc_t10dif_update(u16 crc, const u8 *p, size_t len) +{ + if (IS_ENABLED(CONFIG_CRC_T10DIF_ARCH)) + return crc_t10dif_arch(crc, p, len); + return crc_t10dif_generic(crc, p, len); +} + +static inline u16 crc_t10dif(const u8 *p, size_t len) +{ + return crc_t10dif_update(0, p, len); +} + +#if IS_ENABLED(CONFIG_CRC_T10DIF_ARCH) +bool crc_t10dif_is_optimized(void); +#else +static inline bool crc_t10dif_is_optimized(void) +{ + return false; +} +#endif #endif diff --git a/include/linux/crc32.h b/include/linux/crc32.h index 87f788c0d607..e9bd40056687 100644 --- a/include/linux/crc32.h +++ b/include/linux/crc32.h @@ -8,10 +8,49 @@ #include <linux/types.h> #include <linux/bitrev.h> -u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len); -u32 __pure crc32_le_base(u32 crc, unsigned char const *p, size_t len); -u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len); -u32 __pure crc32_be_base(u32 crc, unsigned char const *p, size_t len); +u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len); +u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len); +u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len); +u32 __pure crc32_be_base(u32 crc, const u8 *p, size_t len); +u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len); +u32 __pure crc32c_le_base(u32 crc, const u8 *p, size_t len); + +static inline u32 __pure crc32_le(u32 crc, const u8 *p, size_t len) +{ + if (IS_ENABLED(CONFIG_CRC32_ARCH)) + return crc32_le_arch(crc, p, len); + return crc32_le_base(crc, p, len); +} + +static inline u32 __pure crc32_be(u32 crc, const u8 *p, size_t len) +{ + if (IS_ENABLED(CONFIG_CRC32_ARCH)) + return crc32_be_arch(crc, p, len); + return crc32_be_base(crc, p, len); +} + +/* TODO: leading underscores should be dropped once callers have been updated */ +static inline u32 __pure __crc32c_le(u32 crc, const u8 *p, size_t len) +{ + if (IS_ENABLED(CONFIG_CRC32_ARCH)) + return crc32c_le_arch(crc, p, len); + return crc32c_le_base(crc, p, len); +} + +/* + * crc32_optimizations() returns flags that indicate which CRC32 library + * functions are using architecture-specific optimizations. Unlike + * IS_ENABLED(CONFIG_CRC32_ARCH) it takes into account the different CRC32 + * variants and also whether any needed CPU features are available at runtime. + */ +#define CRC32_LE_OPTIMIZATION BIT(0) /* crc32_le() is optimized */ +#define CRC32_BE_OPTIMIZATION BIT(1) /* crc32_be() is optimized */ +#define CRC32C_OPTIMIZATION BIT(2) /* __crc32c_le() is optimized */ +#if IS_ENABLED(CONFIG_CRC32_ARCH) +u32 crc32_optimizations(void); +#else +static inline u32 crc32_optimizations(void) { return 0; } +#endif /** * crc32_le_combine - Combine two crc32 check values into one. For two @@ -38,9 +77,6 @@ static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2) return crc32_le_shift(crc1, len2) ^ crc2; } -u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len); -u32 __pure __crc32c_le_base(u32 crc, unsigned char const *p, size_t len); - /** * __crc32c_le_combine - Combine two crc32c check values into one. For two * sequences of bytes, seq1 and seq2 with lengths len1 diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h index 357ae4611a45..47eb78003c26 100644 --- a/include/linux/crc32c.h +++ b/include/linux/crc32c.h @@ -2,9 +2,12 @@ #ifndef _LINUX_CRC32C_H #define _LINUX_CRC32C_H -#include <linux/types.h> +#include <linux/crc32.h> -extern u32 crc32c(u32 crc, const void *address, unsigned int length); +static inline u32 crc32c(u32 crc, const void *address, unsigned int length) +{ + return __crc32c_le(crc, address, length); +} /* This macro exists for backwards-compatibility. */ #define crc32c_le crc32c diff --git a/include/linux/cred.h b/include/linux/cred.h index e4a3155fe409..0c3c4b16b469 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -155,8 +155,6 @@ extern struct cred *prepare_creds(void); extern struct cred *prepare_exec_creds(void); extern int commit_creds(struct cred *); extern void abort_creds(struct cred *); -extern const struct cred *override_creds(const struct cred *); -extern void revert_creds(const struct cred *); extern struct cred *prepare_kernel_cred(struct task_struct *); extern int set_security_override(struct cred *, u32); extern int set_security_override_from_ctx(struct cred *, const char *); @@ -172,12 +170,7 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred) cred->cap_inheritable)); } -/* - * Override creds without bumping reference count. Caller must ensure - * reference remains valid or has taken reference. Almost always not the - * interface you want. Use override_creds()/revert_creds() instead. - */ -static inline const struct cred *override_creds_light(const struct cred *override_cred) +static inline const struct cred *override_creds(const struct cred *override_cred) { const struct cred *old = current->cred; @@ -185,35 +178,12 @@ static inline const struct cred *override_creds_light(const struct cred *overrid return old; } -static inline void revert_creds_light(const struct cred *revert_cred) -{ - rcu_assign_pointer(current->cred, revert_cred); -} - -/** - * get_new_cred_many - Get references on a new set of credentials - * @cred: The new credentials to reference - * @nr: Number of references to acquire - * - * Get references on the specified set of new credentials. The caller must - * release all acquired references. - */ -static inline struct cred *get_new_cred_many(struct cred *cred, int nr) +static inline const struct cred *revert_creds(const struct cred *revert_cred) { - atomic_long_add(nr, &cred->usage); - return cred; -} + const struct cred *override_cred = current->cred; -/** - * get_new_cred - Get a reference on a new set of credentials - * @cred: The new credentials to reference - * - * Get a reference on the specified set of new credentials. The caller must - * release the reference. - */ -static inline struct cred *get_new_cred(struct cred *cred) -{ - return get_new_cred_many(cred, 1); + rcu_assign_pointer(current->cred, revert_cred); + return override_cred; } /** @@ -236,7 +206,8 @@ static inline const struct cred *get_cred_many(const struct cred *cred, int nr) if (!cred) return cred; nonconst_cred->non_rcu = 0; - return get_new_cred_many(nonconst_cred, nr); + atomic_long_add(nr, &nonconst_cred->usage); + return cred; } /* diff --git a/include/linux/damon.h b/include/linux/damon.h index a67f2c4940e9..af525252b853 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -193,11 +193,16 @@ struct damos_quota_goal { * size quota is set, DAMON tries to apply the action only up to &sz bytes * within &reset_interval. * - * Internally, the time quota is transformed to a size quota using estimated - * throughput of the scheme's action. DAMON then compares it against &sz and - * uses smaller one as the effective quota. + * To convince the different types of quotas and goals, DAMON internally + * converts those into one single size quota called "effective quota". DAMON + * internally uses it as the only one real quota. The conversion is made as + * follows. * - * If @goals is not empt, DAMON calculates yet another size quota based on the + * The time quota is transformed to a size quota using estimated throughput of + * the scheme's action. DAMON then compares it against &sz and uses smaller + * one as the effective quota. + * + * If @goals is not empty, DAMON calculates yet another size quota based on the * goals using its internal feedback loop algorithm, for every @reset_interval. * Then, if the new size quota is smaller than the effective quota, it uses the * new size quota as the effective quota. @@ -286,13 +291,33 @@ struct damos_watermarks { * @sz_tried: Total size of regions that the scheme is tried to be applied. * @nr_applied: Total number of regions that the scheme is applied. * @sz_applied: Total size of regions that the scheme is applied. + * @sz_ops_filter_passed: + * Total bytes that passed ops layer-handled DAMOS filters. * @qt_exceeds: Total number of times the quota of the scheme has exceeded. + * + * "Tried an action to a region" in this context means the DAMOS core logic + * determined the region as eligible to apply the action. The access pattern + * (&struct damos_access_pattern), quotas (&struct damos_quota), watermarks + * (&struct damos_watermarks) and filters (&struct damos_filter) that handled + * on core logic can affect this. The core logic asks the operation set + * (&struct damon_operations) to apply the action to the region. + * + * "Applied an action to a region" in this context means the operation set + * (&struct damon_operations) successfully applied the action to the region, at + * least to a part of the region. The filters (&struct damos_filter) that + * handled on operation set layer and type of the action and pages of the + * region can affect this. For example, if a filter is set to exclude + * anonymous pages and the region has only anonymous pages, the region will be + * failed at applying the action. If the action is &DAMOS_PAGEOUT and all + * pages of the region are already paged out, the region will be failed at + * applying the action. */ struct damos_stat { unsigned long nr_tried; unsigned long sz_tried; unsigned long nr_applied; unsigned long sz_applied; + unsigned long sz_ops_filter_passed; unsigned long qt_exceeds; }; @@ -327,8 +352,9 @@ enum damos_filter_type { /** * struct damos_filter - DAMOS action target memory filter. - * @type: Type of the page. - * @matching: If the matching page should filtered out or in. + * @type: Type of the target memory. + * @matching: Whether this is for @type-matching memory. + * @allow: Whether to include or exclude the @matching memory. * @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG. * @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR. * @target_idx: Index of the &struct damon_target of @@ -337,13 +363,15 @@ enum damos_filter_type { * @list: List head for siblings. * * Before applying the &damos->action to a memory region, DAMOS checks if each - * page of the region matches to this and avoid applying the action if so. - * Support of each filter type depends on the running &struct damon_operations - * and the type. Refer to &enum damos_filter_type for more detai. + * byte of the region matches to this given condition and avoid applying the + * action if so. Support of each filter type depends on the running &struct + * damon_operations and the type. Refer to &enum damos_filter_type for more + * details. */ struct damos_filter { enum damos_filter_type type; bool matching; + bool allow; union { unsigned short memcg_id; struct damon_addr_range addr_range; @@ -352,6 +380,31 @@ struct damos_filter { struct list_head list; }; +struct damon_ctx; +struct damos; + +/** + * struct damos_walk_control - Control damos_walk(). + * + * @walk_fn: Function to be called back for each region. + * @data: Data that will be passed to walk functions. + * + * Control damos_walk(), which requests specific kdamond to invoke the given + * function to each region that eligible to apply actions of the kdamond's + * schemes. Refer to damos_walk() for more details. + */ +struct damos_walk_control { + void (*walk_fn)(void *data, struct damon_ctx *ctx, + struct damon_target *t, struct damon_region *r, + struct damos *s, unsigned long sz_filter_passed); + void *data; +/* private: internal use only */ + /* informs if the kdamond finished handling of the walk request */ + struct completion completion; + /* informs if the walk is canceled. */ + bool canceled; +}; + /** * struct damos_access_pattern - Target access pattern of the given scheme. * @min_sz_region: Minimum size of target regions. @@ -415,6 +468,8 @@ struct damos { * @action */ unsigned long next_apply_sis; + /* informs if ongoing DAMOS walk for this scheme is finished */ + bool walk_completed; /* public: */ struct damos_quota quota; struct damos_watermarks wmarks; @@ -442,8 +497,6 @@ enum damon_ops_id { NR_DAMON_OPS, }; -struct damon_ctx; - /** * struct damon_operations - Monitoring operations for given use cases. * @@ -487,7 +540,8 @@ struct damon_ctx; * @apply_scheme is called from @kdamond when a region for user provided * DAMON-based operation scheme is found. It should apply the scheme's action * to the region and return bytes of the region that the action is successfully - * applied. + * applied. It should also report how many bytes of the region has passed + * filters (&struct damos_filter) that handled by itself. * @target_valid should check whether the target is still valid for the * monitoring. * @cleanup is called from @kdamond just before its termination. @@ -504,7 +558,7 @@ struct damon_operations { struct damos *scheme); unsigned long (*apply_scheme)(struct damon_ctx *context, struct damon_target *t, struct damon_region *r, - struct damos *scheme); + struct damos *scheme, unsigned long *sz_filter_passed); bool (*target_valid)(struct damon_target *t); void (*cleanup)(struct damon_ctx *context); }; @@ -552,6 +606,27 @@ struct damon_callback { void (*before_terminate)(struct damon_ctx *context); }; +/* + * struct damon_call_control - Control damon_call(). + * + * @fn: Function to be called back. + * @data: Data that will be passed to @fn. + * @return_code: Return code from @fn invocation. + * + * Control damon_call(), which requests specific kdamond to invoke a given + * function. Refer to damon_call() for more details. + */ +struct damon_call_control { + int (*fn)(void *data); + void *data; + int return_code; +/* private: internal use only */ + /* informs if the kdamond finished handling of the request */ + struct completion completion; + /* informs if the kdamond canceled @fn infocation */ + bool canceled; +}; + /** * struct damon_attrs - Monitoring attributes for accuracy/overhead control. * @@ -632,6 +707,12 @@ struct damon_ctx { /* for scheme quotas prioritization */ unsigned long *regions_score_histogram; + struct damon_call_control *call_control; + struct mutex call_control_lock; + + struct damos_walk_control *walk_control; + struct mutex walk_control_lock; + /* public: */ struct task_struct *kdamond; struct mutex kdamond_lock; @@ -725,7 +806,7 @@ void damon_update_region_access_rate(struct damon_region *r, bool accessed, struct damon_attrs *attrs); struct damos_filter *damos_new_filter(enum damos_filter_type type, - bool matching); + bool matching, bool allow); void damos_add_filter(struct damos *s, struct damos_filter *f); void damos_destroy_filter(struct damos_filter *f); @@ -779,6 +860,9 @@ static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive); int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); +int damon_call(struct damon_ctx *ctx, struct damon_call_control *control); +int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control); + int damon_set_region_biggest_system_ram_default(struct damon_target *t, unsigned long *start, unsigned long *end); diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 6639f48dac36..800dcc360db2 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -29,25 +29,39 @@ struct task_delay_info { * XXX_delay contains the accumulated delay time in nanoseconds. */ u64 blkio_start; + u64 blkio_delay_max; + u64 blkio_delay_min; u64 blkio_delay; /* wait for sync block io completion */ u64 swapin_start; + u64 swapin_delay_max; + u64 swapin_delay_min; u64 swapin_delay; /* wait for swapin */ u32 blkio_count; /* total count of the number of sync block */ /* io operations performed */ u32 swapin_count; /* total count of swapin */ u64 freepages_start; + u64 freepages_delay_max; + u64 freepages_delay_min; u64 freepages_delay; /* wait for memory reclaim */ u64 thrashing_start; + u64 thrashing_delay_max; + u64 thrashing_delay_min; u64 thrashing_delay; /* wait for thrashing page */ u64 compact_start; + u64 compact_delay_max; + u64 compact_delay_min; u64 compact_delay; /* wait for memory compact */ u64 wpcopy_start; + u64 wpcopy_delay_max; + u64 wpcopy_delay_min; u64 wpcopy_delay; /* wait for write-protect copy */ + u64 irq_delay_max; + u64 irq_delay_min; u64 irq_delay; /* wait for IRQ/SOFTIRQ */ u32 freepages_count; /* total count of memory reclaim */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 8321f65897f3..bcc6d7b69470 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -299,6 +299,9 @@ struct target_type { #define dm_target_supports_mixed_zoned_model(type) (false) #endif +#define DM_TARGET_ATOMIC_WRITES 0x00000400 +#define dm_target_supports_atomic_writes(type) ((type)->features & DM_TARGET_ATOMIC_WRITES) + struct dm_target { struct dm_table *table; struct target_type *type; diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h index cdc4757217f9..b18658bce2c3 100644 --- a/include/linux/device/bus.h +++ b/include/linux/device/bus.h @@ -48,6 +48,7 @@ struct fwnode_handle; * will never get called until they do. * @remove: Called when a device removed from this bus. * @shutdown: Called at shut-down time to quiesce the device. + * @irq_get_affinity: Get IRQ affinity mask for the device on this bus. * * @online: Called to put the device back online (after offlining it). * @offline: Called to put the device offline for hot-removal. May fail. @@ -87,6 +88,8 @@ struct bus_type { void (*sync_state)(struct device *dev); void (*remove)(struct device *dev); void (*shutdown)(struct device *dev); + const struct cpumask *(*irq_get_affinity)(struct device *dev, + unsigned int irq_vec); int (*online)(struct device *dev); int (*offline)(struct device *dev); diff --git a/include/linux/efi.h b/include/linux/efi.h index e5815867aba9..053c57e61869 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -363,7 +363,6 @@ void efi_native_runtime_setup(void); #define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) -#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2) #define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) #define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93) #define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0) @@ -373,7 +372,6 @@ void efi_native_runtime_setup(void); #define EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID EFI_GUID(0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c) #define EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID EFI_GUID(0x05c99a21, 0xc70f, 0x4ad2, 0x8a, 0x5f, 0x35, 0xdf, 0x33, 0x43, 0xf5, 0x1e) #define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a) -#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39) #define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a) #define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) #define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) @@ -1286,8 +1284,6 @@ struct linux_efi_memreserve { void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size); -char *efi_systab_show_arch(char *str); - /* * The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table can be provided * to the kernel by an EFI boot loader. The table contains a packed diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 752e0b297582..78318d49276d 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -179,6 +179,7 @@ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table, int em_dev_update_chip_binning(struct device *dev); int em_update_performance_limits(struct em_perf_domain *pd, unsigned long freq_min_khz, unsigned long freq_max_khz); +void em_rebuild_sched_domains(void); /** * em_pd_get_efficient_state() - Get an efficient performance state from the EM @@ -404,6 +405,7 @@ int em_update_performance_limits(struct em_perf_domain *pd, { return -EINVAL; } +static inline void em_rebuild_sched_domains(void) {} #endif #endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index ecf203f01034..9a1eacf35d37 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -81,7 +81,7 @@ static const u8 eth_ipv6_mcast_addr_base[ETH_ALEN] __aligned(2) = * is_link_local_ether_addr - Determine if given Ethernet address is link-local * @addr: Pointer to a six-byte array containing the Ethernet address * - * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per + * Return: true if address is link local reserved addr (01:80:c2:00:00:0X) per * IEEE 802.1Q 8.6.3 Frame filtering. * * Please note: addr must be aligned to u16. @@ -104,7 +104,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr) * is_zero_ether_addr - Determine if give Ethernet address is all zeros. * @addr: Pointer to a six-byte array containing the Ethernet address * - * Return true if the address is all zeroes. + * Return: true if the address is all zeroes. * * Please note: addr must be aligned to u16. */ @@ -123,7 +123,7 @@ static inline bool is_zero_ether_addr(const u8 *addr) * is_multicast_ether_addr - Determine if the Ethernet address is a multicast. * @addr: Pointer to a six-byte array containing the Ethernet address * - * Return true if the address is a multicast address. + * Return: true if the address is a multicast address. * By definition the broadcast address is also a multicast address. */ static inline bool is_multicast_ether_addr(const u8 *addr) @@ -157,7 +157,7 @@ static inline bool is_multicast_ether_addr_64bits(const u8 *addr) * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802). * @addr: Pointer to a six-byte array containing the Ethernet address * - * Return true if the address is a local address. + * Return: true if the address is a local address. */ static inline bool is_local_ether_addr(const u8 *addr) { @@ -168,7 +168,7 @@ static inline bool is_local_ether_addr(const u8 *addr) * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast * @addr: Pointer to a six-byte array containing the Ethernet address * - * Return true if the address is the broadcast address. + * Return: true if the address is the broadcast address. * * Please note: addr must be aligned to u16. */ @@ -183,7 +183,7 @@ static inline bool is_broadcast_ether_addr(const u8 *addr) * is_unicast_ether_addr - Determine if the Ethernet address is unicast * @addr: Pointer to a six-byte array containing the Ethernet address * - * Return true if the address is a unicast address. + * Return: true if the address is a unicast address. */ static inline bool is_unicast_ether_addr(const u8 *addr) { @@ -197,7 +197,7 @@ static inline bool is_unicast_ether_addr(const u8 *addr) * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not * a multicast address, and is not FF:FF:FF:FF:FF:FF. * - * Return true if the address is valid. + * Return: true if the address is valid. * * Please note: addr must be aligned to u16. */ @@ -214,7 +214,7 @@ static inline bool is_valid_ether_addr(const u8 *addr) * * Check that the value from the Ethertype/length field is a valid Ethertype. * - * Return true if the valid is an 802.3 supported Ethertype. + * Return: true if the valid is an 802.3 supported Ethertype. */ static inline bool eth_proto_is_802_3(__be16 proto) { @@ -458,7 +458,7 @@ static inline bool ether_addr_is_ip_mcast(const u8 *addr) * ether_addr_to_u64 - Convert an Ethernet address into a u64 value. * @addr: Pointer to a six-byte array containing the Ethernet address * - * Return a u64 value of the address + * Return: a u64 value of the address */ static inline u64 ether_addr_to_u64(const u8 *addr) { diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index b8b935b52603..870994cc3ef7 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -78,6 +78,9 @@ enum { * @cqe_size: Size of TX/RX completion queue event * @tx_push_buf_len: Size of TX push buffer * @tx_push_buf_max_len: Maximum allowed size of TX push buffer + * @hds_thresh: Packet size threshold for header data split (HDS) + * @hds_thresh_max: Maximum supported setting for @hds_threshold + * */ struct kernel_ethtool_ringparam { u32 rx_buf_len; @@ -87,6 +90,8 @@ struct kernel_ethtool_ringparam { u32 cqe_size; u32 tx_push_buf_len; u32 tx_push_buf_max_len; + u32 hds_thresh; + u32 hds_thresh_max; }; /** @@ -97,6 +102,7 @@ struct kernel_ethtool_ringparam { * @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push * @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len * @ETHTOOL_RING_USE_TCP_DATA_SPLIT: capture for setting tcp_data_split + * @ETHTOOL_RING_USE_HDS_THRS: capture for setting header-data-split-thresh */ enum ethtool_supported_ring_param { ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), @@ -105,6 +111,7 @@ enum ethtool_supported_ring_param { ETHTOOL_RING_USE_RX_PUSH = BIT(3), ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4), ETHTOOL_RING_USE_TCP_DATA_SPLIT = BIT(5), + ETHTOOL_RING_USE_HDS_THRS = BIT(6), }; #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) @@ -257,7 +264,7 @@ struct ethtool_link_ksettings { * @mode : one of the ETHTOOL_LINK_MODE_*_BIT * (not atomic, no bound checking) * - * Returns true/false. + * Returns: true/false. */ #define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \ test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) @@ -412,6 +419,29 @@ struct ethtool_eth_phy_stats { ); }; +/** + * struct ethtool_phy_stats - PHY-level statistics counters + * @rx_packets: Total successfully received frames + * @rx_bytes: Total successfully received bytes + * @rx_errors: Total received frames with errors (e.g., CRC errors) + * @tx_packets: Total successfully transmitted frames + * @tx_bytes: Total successfully transmitted bytes + * @tx_errors: Total transmitted frames with errors + * + * This structure provides a standardized interface for reporting + * PHY-level statistics counters. It is designed to expose statistics + * commonly provided by PHYs but not explicitly defined in the IEEE + * 802.3 standard. + */ +struct ethtool_phy_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_errors; + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; +}; + /* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed * via a more targeted API. */ @@ -529,6 +559,12 @@ struct ethtool_rmon_stats { /** * struct ethtool_ts_stats - HW timestamping statistics * @pkts: Number of packets successfully timestamped by the hardware. + * @onestep_pkts_unconfirmed: Number of PTP packets with one-step TX + * timestamping that were sent, but for which the + * device offers no confirmation whether they made + * it onto the wire and the timestamp was inserted + * in the originTimestamp or correctionField, or + * not. * @lost: Number of hardware timestamping requests where the timestamping * information from the hardware never arrived for submission with * the skb. @@ -541,6 +577,7 @@ struct ethtool_rmon_stats { struct ethtool_ts_stats { struct_group(tx_stats, u64 pkts; + u64 onestep_pkts_unconfirmed; u64 lost; u64 err; ); @@ -711,6 +748,7 @@ struct ethtool_rxfh_param { * @cmd: command number = %ETHTOOL_GET_TS_INFO * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags * @phc_index: device index of the associated PHC, or -1 if there is none + * @phc_qualifier: qualifier of the associated PHC * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values */ @@ -718,6 +756,7 @@ struct kernel_ethtool_ts_info { u32 cmd; u32 so_timestamping; int phc_index; + enum hwtstamp_provider_qualifier phc_qualifier; enum hwtstamp_tx_types tx_types; enum hwtstamp_rx_filters rx_filters; }; @@ -749,6 +788,7 @@ struct kernel_ethtool_ts_info { * @rss_context argument to @create_rxfh_context and friends. * @supported_coalesce_params: supported types of interrupt coalescing. * @supported_ring_params: supported ring params. + * @supported_hwtstamp_qualifiers: bitfield of supported hwtstamp qualifier. * @get_drvinfo: Report driver/device information. Modern drivers no * longer have to implement this callback. Most fields are * correctly filled in by the core using system information, or @@ -966,6 +1006,7 @@ struct ethtool_ops { u32 rxfh_max_num_contexts; u32 supported_coalesce_params; u32 supported_ring_params; + u32 supported_hwtstamp_qualifiers; void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); @@ -1199,7 +1240,7 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, * @dev: pointer to net_device structure * @vclock_index: pointer to pointer of vclock index * - * Return number of phc vclocks + * Return: number of phc vclocks */ int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index); @@ -1253,7 +1294,7 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add, * ethtool_get_ts_info_by_layer - Obtains time stamping capabilities from the MAC or PHY layer. * @dev: pointer to net_device structure * @info: buffer to hold the result - * Returns zero on success, non-zero otherwise. + * Returns: zero on success, non-zero otherwise. */ int ethtool_get_ts_info_by_layer(struct net_device *dev, struct kernel_ethtool_ts_info *info); @@ -1299,24 +1340,4 @@ struct ethtool_forced_speed_map { void ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size); - -/* C33 PSE extended state and substate. */ -struct ethtool_c33_pse_ext_state_info { - enum ethtool_c33_pse_ext_state c33_pse_ext_state; - union { - enum ethtool_c33_pse_ext_substate_error_condition error_condition; - enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable; - enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted; - enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim; - enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected; - enum ethtool_c33_pse_ext_substate_power_not_available power_not_available; - enum ethtool_c33_pse_ext_substate_short_detected short_detected; - u32 __c33_pse_ext_substate; - }; -}; - -struct ethtool_c33_pse_pw_limit_range { - u32 min; - u32 max; -}; #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 4cc8801e50e3..a087606ace19 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -3,6 +3,7 @@ #define LINUX_EXPORTFS_H 1 #include <linux/types.h> +#include <linux/path.h> struct dentry; struct iattr; @@ -156,6 +157,17 @@ struct fid { }; }; +enum handle_to_path_flags { + HANDLE_CHECK_PERMS = (1 << 0), + HANDLE_CHECK_SUBTREE = (1 << 1), +}; + +struct handle_to_path_ctx { + struct path root; + enum handle_to_path_flags flags; + unsigned int fh_flags; +}; + #define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */ #define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */ #define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */ @@ -225,6 +237,12 @@ struct fid { * is also a directory. In the event that it cannot be found, or storage * space cannot be allocated, a %ERR_PTR should be returned. * + * permission: + * Allow filesystems to specify a custom permission function. + * + * open: + * Allow filesystems to specify a custom open function. + * * commit_metadata: * @commit_metadata should commit metadata changes to stable storage. * @@ -251,6 +269,8 @@ struct export_operations { bool write, u32 *device_generation); int (*commit_blocks)(struct inode *inode, struct iomap *iomaps, int nr_iomaps, struct iattr *iattr); + int (*permission)(struct handle_to_path_ctx *ctx, unsigned int oflags); + struct file * (*open)(struct path *path, unsigned int oflags); #define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */ #define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */ #define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */ diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index 89ff45bd6f01..78f660ebc318 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -89,6 +89,16 @@ #define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \ FAN_RENAME) +/* Content events can be used to inspect file content */ +#define FANOTIFY_CONTENT_PERM_EVENTS (FAN_OPEN_PERM | FAN_OPEN_EXEC_PERM | \ + FAN_ACCESS_PERM) +/* Pre-content events can be used to fill file content */ +#define FANOTIFY_PRE_CONTENT_EVENTS (FAN_PRE_ACCESS) + +/* Events that require a permission response from user */ +#define FANOTIFY_PERM_EVENTS (FANOTIFY_CONTENT_PERM_EVENTS | \ + FANOTIFY_PRE_CONTENT_EVENTS) + /* Events that can be reported with event->fd */ #define FANOTIFY_FD_EVENTS (FANOTIFY_PATH_EVENTS | FANOTIFY_PERM_EVENTS) @@ -104,10 +114,6 @@ FANOTIFY_INODE_EVENTS | \ FANOTIFY_ERROR_EVENTS) -/* Events that require a permission response from user */ -#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \ - FAN_OPEN_EXEC_PERM) - /* Extra flags that may be reported with event or control handling of events */ #define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR) @@ -126,7 +132,9 @@ /* These masks check for invalid bits in permission responses. */ #define FANOTIFY_RESPONSE_ACCESS (FAN_ALLOW | FAN_DENY) #define FANOTIFY_RESPONSE_FLAGS (FAN_AUDIT | FAN_INFO) -#define FANOTIFY_RESPONSE_VALID_MASK (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS) +#define FANOTIFY_RESPONSE_VALID_MASK \ + (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS | \ + (FAN_ERRNO_MASK << FAN_ERRNO_SHIFT)) /* Do not use these old uapi constants internally */ #undef FAN_ALL_CLASS_BITS diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h index c50882f19235..966092ffa89a 100644 --- a/include/linux/fiemap.h +++ b/include/linux/fiemap.h @@ -5,12 +5,18 @@ #include <uapi/linux/fiemap.h> #include <linux/fs.h> +/** + * struct fiemap_extent_info - fiemap request to a filesystem + * @fi_flags: Flags as passed from user + * @fi_extents_mapped: Number of mapped extents + * @fi_extents_max: Size of fiemap_extent array + * @fi_extents_start: Start of fiemap_extent array + */ struct fiemap_extent_info { - unsigned int fi_flags; /* Flags as passed from user */ - unsigned int fi_extents_mapped; /* Number of mapped extents */ - unsigned int fi_extents_max; /* Size of fiemap_extent array */ - struct fiemap_extent __user *fi_extents_start; /* Start of - fiemap_extent array */ + unsigned int fi_flags; + unsigned int fi_extents_mapped; + unsigned int fi_extents_max; + struct fiemap_extent __user *fi_extents_start; }; int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, diff --git a/include/linux/filter.h b/include/linux/filter.h index 0477254bc2d3..a3ea46281595 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1179,17 +1179,18 @@ static inline int xdp_ok_fwd_dev(const struct net_device *fwd, * This does not appear to be a real limitation for existing software. */ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, - struct xdp_buff *xdp, struct bpf_prog *prog); + struct xdp_buff *xdp, const struct bpf_prog *prog); int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, - struct bpf_prog *prog); + const struct bpf_prog *prog); int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp, struct xdp_frame *xdpf, - struct bpf_prog *prog); + const struct bpf_prog *prog); void xdp_do_flush(void); -void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act); +void bpf_warn_invalid_xdp_action(const struct net_device *dev, + const struct bpf_prog *prog, u32 act); #ifdef CONFIG_INET struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, diff --git a/include/linux/firmware/cirrus/cs_dsp_test_utils.h b/include/linux/firmware/cirrus/cs_dsp_test_utils.h new file mode 100644 index 000000000000..4f87a908ab4f --- /dev/null +++ b/include/linux/firmware/cirrus/cs_dsp_test_utils.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Support utilities for cs_dsp testing. + * + * Copyright (C) 2024 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + */ + +#include <linux/regmap.h> +#include <linux/firmware/cirrus/wmfw.h> + +struct kunit; +struct cs_dsp_test; +struct cs_dsp_test_local; + +/** + * struct cs_dsp_test - base class for test utilities + * + * @test: Pointer to struct kunit instance. + * @dsp: Pointer to struct cs_dsp instance. + * @local: Private data for each test suite. + */ +struct cs_dsp_test { + struct kunit *test; + struct cs_dsp *dsp; + + struct cs_dsp_test_local *local; + + /* Following members are private */ + bool saw_bus_write; +}; + +/** + * struct cs_dsp_mock_alg_def - Info for creating a mock algorithm entry. + * + * @id Algorithm ID. + * @ver; Algorithm version. + * @xm_base_words XM base address in DSP words. + * @xm_size_words XM size in DSP words. + * @ym_base_words YM base address in DSP words. + * @ym_size_words YM size in DSP words. + * @zm_base_words ZM base address in DSP words. + * @zm_size_words ZM size in DSP words. + */ +struct cs_dsp_mock_alg_def { + unsigned int id; + unsigned int ver; + unsigned int xm_base_words; + unsigned int xm_size_words; + unsigned int ym_base_words; + unsigned int ym_size_words; + unsigned int zm_base_words; + unsigned int zm_size_words; +}; + +struct cs_dsp_mock_coeff_def { + const char *shortname; + const char *fullname; + const char *description; + u16 type; + u16 flags; + u16 mem_type; + unsigned int offset_dsp_words; + unsigned int length_bytes; +}; + +/** + * struct cs_dsp_mock_xm_header - XM header builder + * + * @test_priv: Pointer to the struct cs_dsp_test. + * @blob_data: Pointer to the created blob data. + * @blob_size_bytes: Size of the data at blob_data. + */ +struct cs_dsp_mock_xm_header { + struct cs_dsp_test *test_priv; + void *blob_data; + size_t blob_size_bytes; +}; + +struct cs_dsp_mock_wmfw_builder; +struct cs_dsp_mock_bin_builder; + +extern const unsigned int cs_dsp_mock_adsp2_32bit_sysbase; +extern const unsigned int cs_dsp_mock_adsp2_16bit_sysbase; +extern const unsigned int cs_dsp_mock_halo_core_base; +extern const unsigned int cs_dsp_mock_halo_sysinfo_base; + +extern const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[]; +extern const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[]; +extern const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[]; +extern const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[]; +extern const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[]; +extern const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[]; +int cs_dsp_mock_count_regions(const unsigned int *region_sizes); +unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type); +unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type); +unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv); +unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type); +unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type); +unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type); +bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv); +int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type); +unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words); +unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv, + unsigned int alg_id, + int mem_type); +unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv); +unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header); +void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv); +int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header); +struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv, + const struct cs_dsp_mock_alg_def *algs, + size_t num_algs); + +int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv); +void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv, + unsigned int first_reg, unsigned int last_reg); +void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv, + unsigned int first_reg, size_t num_regs); +void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv, + unsigned int first_reg, size_t num_bytes); +void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv); +bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs); + +struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv, + int format_version, + unsigned int fw_version); +void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder, + unsigned int alg_id, unsigned int alg_ver, + int type, unsigned int offset, + const void *payload_data, size_t payload_len_bytes); +void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder, + const char *info); +void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder, + const char *name); +void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder, + unsigned int alg_id, unsigned int alg_ver, + int mem_region, unsigned int reg_addr_offset, + const void *payload_data, size_t payload_len_bytes); +struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder); + +struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv, + int format_version); +void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder, + int mem_region, unsigned int mem_offset_dsp_words, + const void *payload_data, size_t payload_len_bytes); +void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder, + const char *info); +void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder, + int mem_region, unsigned int mem_offset_dsp_words, + const void *payload_data, size_t payload_len_bytes); +void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder, + unsigned int alg_id, + const char *name, + const char *description); +void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder, + const struct cs_dsp_mock_coeff_def *def); +void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder); +struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder); +int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder); diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h index 4621aec0328c..983e1591bbba 100644 --- a/include/linux/firmware/qcom/qcom_scm.h +++ b/include/linux/firmware/qcom/qcom_scm.h @@ -105,6 +105,14 @@ bool qcom_scm_ice_available(void); int qcom_scm_ice_invalidate_key(u32 index); int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, enum qcom_scm_ice_cipher cipher, u32 data_unit_size); +bool qcom_scm_has_wrapped_key_support(void); +int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size, + u8 *sw_secret, size_t sw_secret_size); +int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size); +int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size, + u8 *eph_key, size_t eph_key_size); +int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size, + u8 *lt_key, size_t lt_key_size); bool qcom_scm_hdcp_available(void); int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h index 3abe614ef5f0..4d3f8074c137 100644 --- a/include/linux/folio_queue.h +++ b/include/linux/folio_queue.h @@ -37,16 +37,20 @@ struct folio_queue { #if PAGEVEC_SIZE > BITS_PER_LONG #error marks is not big enough #endif + unsigned int rreq_id; + unsigned int debug_id; }; /** * folioq_init - Initialise a folio queue segment * @folioq: The segment to initialise + * @rreq_id: The request identifier to use in tracelines. * - * Initialise a folio queue segment. Note that the folio pointers are - * left uninitialised. + * Initialise a folio queue segment and set an identifier to be used in traces. + * + * Note that the folio pointers are left uninitialised. */ -static inline void folioq_init(struct folio_queue *folioq) +static inline void folioq_init(struct folio_queue *folioq, unsigned int rreq_id) { folio_batch_init(&folioq->vec); folioq->next = NULL; @@ -54,6 +58,8 @@ static inline void folioq_init(struct folio_queue *folioq) folioq->marks = 0; folioq->marks2 = 0; folioq->marks3 = 0; + folioq->rreq_id = rreq_id; + folioq->debug_id = 0; } /** diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h index f39869588117..702099f08929 100644 --- a/include/linux/fprobe.h +++ b/include/linux/fprobe.h @@ -5,47 +5,68 @@ #include <linux/compiler.h> #include <linux/ftrace.h> -#include <linux/rethook.h> +#include <linux/rcupdate.h> +#include <linux/refcount.h> +#include <linux/slab.h> struct fprobe; - typedef int (*fprobe_entry_cb)(struct fprobe *fp, unsigned long entry_ip, - unsigned long ret_ip, struct pt_regs *regs, + unsigned long ret_ip, struct ftrace_regs *regs, void *entry_data); typedef void (*fprobe_exit_cb)(struct fprobe *fp, unsigned long entry_ip, - unsigned long ret_ip, struct pt_regs *regs, + unsigned long ret_ip, struct ftrace_regs *regs, void *entry_data); /** + * struct fprobe_hlist_node - address based hash list node for fprobe. + * + * @hlist: The hlist node for address search hash table. + * @addr: One of the probing address of @fp. + * @fp: The fprobe which owns this. + */ +struct fprobe_hlist_node { + struct hlist_node hlist; + unsigned long addr; + struct fprobe *fp; +}; + +/** + * struct fprobe_hlist - hash list nodes for fprobe. + * + * @hlist: The hlist node for existence checking hash table. + * @rcu: rcu_head for RCU deferred release. + * @fp: The fprobe which owns this fprobe_hlist. + * @size: The size of @array. + * @array: The fprobe_hlist_node for each address to probe. + */ +struct fprobe_hlist { + struct hlist_node hlist; + struct rcu_head rcu; + struct fprobe *fp; + int size; + struct fprobe_hlist_node array[] __counted_by(size); +}; + +/** * struct fprobe - ftrace based probe. - * @ops: The ftrace_ops. + * * @nmissed: The counter for missing events. * @flags: The status flag. - * @rethook: The rethook data structure. (internal data) * @entry_data_size: The private data storage size. - * @nr_maxactive: The max number of active functions. * @entry_handler: The callback function for function entry. * @exit_handler: The callback function for function exit. + * @hlist_array: The fprobe_hlist for fprobe search from IP hash table. */ struct fprobe { -#ifdef CONFIG_FUNCTION_TRACER - /* - * If CONFIG_FUNCTION_TRACER is not set, CONFIG_FPROBE is disabled too. - * But user of fprobe may keep embedding the struct fprobe on their own - * code. To avoid build error, this will keep the fprobe data structure - * defined here, but remove ftrace_ops data structure. - */ - struct ftrace_ops ops; -#endif unsigned long nmissed; unsigned int flags; - struct rethook *rethook; size_t entry_data_size; - int nr_maxactive; fprobe_entry_cb entry_handler; fprobe_exit_cb exit_handler; + + struct fprobe_hlist *hlist_array; }; /* This fprobe is soft-disabled. */ @@ -121,4 +142,9 @@ static inline void enable_fprobe(struct fprobe *fp) fp->flags &= ~FPROBE_FL_DISABLED; } +/* The entry data size is 4 bits (=16) * sizeof(long) in maximum */ +#define FPROBE_DATA_SIZE_BITS 4 +#define MAX_FPROBE_DATA_SIZE_WORD ((1L << FPROBE_DATA_SIZE_BITS) - 1) +#define MAX_FPROBE_DATA_SIZE (MAX_FPROBE_DATA_SIZE_WORD * sizeof(long)) + #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index 7e29433c5ecc..be3ad155ec9f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -173,13 +173,20 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define FMODE_NOREUSE ((__force fmode_t)(1 << 23)) -/* FMODE_* bit 24 */ - /* File is embedded in backing_file object */ -#define FMODE_BACKING ((__force fmode_t)(1 << 25)) +#define FMODE_BACKING ((__force fmode_t)(1 << 24)) + +/* + * Together with FMODE_NONOTIFY_PERM defines which fsnotify events shouldn't be + * generated (see below) + */ +#define FMODE_NONOTIFY ((__force fmode_t)(1 << 25)) -/* File was opened by fanotify and shouldn't generate fanotify events */ -#define FMODE_NONOTIFY ((__force fmode_t)(1 << 26)) +/* + * Together with FMODE_NONOTIFY defines which fsnotify events shouldn't be + * generated (see below) + */ +#define FMODE_NONOTIFY_PERM ((__force fmode_t)(1 << 26)) /* File is capable of returning -EAGAIN if I/O will block */ #define FMODE_NOWAIT ((__force fmode_t)(1 << 27)) @@ -191,6 +198,32 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define FMODE_NOACCOUNT ((__force fmode_t)(1 << 29)) /* + * The two FMODE_NONOTIFY* define which fsnotify events should not be generated + * for a file. These are the possible values of (f->f_mode & + * FMODE_FSNOTIFY_MASK) and their meaning: + * + * FMODE_NONOTIFY - suppress all (incl. non-permission) events. + * FMODE_NONOTIFY_PERM - suppress permission (incl. pre-content) events. + * FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only pre-content events. + */ +#define FMODE_FSNOTIFY_MASK \ + (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM) + +#define FMODE_FSNOTIFY_NONE(mode) \ + ((mode & FMODE_FSNOTIFY_MASK) == FMODE_NONOTIFY) +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS +#define FMODE_FSNOTIFY_PERM(mode) \ + ((mode & FMODE_FSNOTIFY_MASK) == 0 || \ + (mode & FMODE_FSNOTIFY_MASK) == (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM)) +#define FMODE_FSNOTIFY_HSM(mode) \ + ((mode & FMODE_FSNOTIFY_MASK) == 0) +#else +#define FMODE_FSNOTIFY_PERM(mode) 0 +#define FMODE_FSNOTIFY_HSM(mode) 0 +#endif + + +/* * Attribute flags. These should be or-ed together to figure out what * has been changed! */ @@ -322,6 +355,7 @@ struct readahead_control; #define IOCB_NOWAIT (__force int) RWF_NOWAIT #define IOCB_APPEND (__force int) RWF_APPEND #define IOCB_ATOMIC (__force int) RWF_ATOMIC +#define IOCB_DONTCACHE (__force int) RWF_DONTCACHE /* non-RWF related bits - start at 16 */ #define IOCB_EVENTFD (1 << 16) @@ -348,6 +382,7 @@ struct readahead_control; #define IOCB_DIO_CALLER_COMP (1 << 22) /* kiocb is a read or write operation submitted by fs/aio.c. */ #define IOCB_AIO_RW (1 << 23) +#define IOCB_HAS_METADATA (1 << 24) /* for use in trace events */ #define TRACE_IOCB_STRINGS \ @@ -356,7 +391,8 @@ struct readahead_control; { IOCB_SYNC, "SYNC" }, \ { IOCB_NOWAIT, "NOWAIT" }, \ { IOCB_APPEND, "APPEND" }, \ - { IOCB_ATOMIC, "ATOMIC"}, \ + { IOCB_ATOMIC, "ATOMIC" }, \ + { IOCB_DONTCACHE, "DONTCACHE" }, \ { IOCB_EVENTFD, "EVENTFD"}, \ { IOCB_DIRECT, "DIRECT" }, \ { IOCB_WRITE, "WRITE" }, \ @@ -626,6 +662,7 @@ is_uncached_acl(struct posix_acl *acl) #define IOP_XATTR 0x0008 #define IOP_DEFAULT_READLINK 0x0010 #define IOP_MGTIME 0x0020 +#define IOP_CACHED_LINK 0x0040 /* * Keep mostly read-only and often accessed (especially for @@ -723,7 +760,10 @@ struct inode { }; struct file_lock_context *i_flctx; struct address_space i_data; - struct list_head i_devices; + union { + struct list_head i_devices; + int i_linklen; + }; union { struct pipe_inode_info *i_pipe; struct cdev *i_cdev; @@ -749,6 +789,13 @@ struct inode { void *i_private; /* fs or device private pointer */ } __randomize_layout; +static inline void inode_set_cached_link(struct inode *inode, char *link, int linklen) +{ + inode->i_link = link; + inode->i_linklen = linklen; + inode->i_opflags |= IOP_CACHED_LINK; +} + /* * Get bit address from inode->i_state to use with wait_var_event() * infrastructre. @@ -1232,6 +1279,7 @@ extern int send_sigurg(struct file *file); #define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */ #define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */ #define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */ +#define SB_I_ALLOW_HSM 0x00004000 /* Allow HSM events on this superblock */ /* Possible states of 'frozen' field */ enum { @@ -2127,6 +2175,8 @@ struct file_operations { #define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5)) /* Supports asynchronous lock callbacks */ #define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6)) +/* File system supports uncached read/write buffered IO */ +#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7)) /* Wrap a directory iterator that needs exclusive inode access */ int wrap_directory_iterator(struct file *, struct dir_context *, @@ -2751,6 +2801,8 @@ static inline struct file *file_open_root_mnt(struct vfsmount *mnt, } struct file *dentry_open(const struct path *path, int flags, const struct cred *creds); +struct file *dentry_open_nonotify(const struct path *path, int flags, + const struct cred *cred); struct file *dentry_create(const struct path *path, int flags, umode_t mode, const struct cred *cred); struct path *backing_file_user_path(struct file *f); @@ -2874,6 +2926,8 @@ extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart, extern int __must_check file_check_and_advance_wb_err(struct file *file); extern int __must_check file_write_and_wait_range(struct file *file, loff_t start, loff_t end); +int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start, + loff_t end); static inline int file_write_and_wait(struct file *file) { @@ -2906,6 +2960,11 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) (iocb->ki_flags & IOCB_SYNC) ? 0 : 1); if (ret) return ret; + } else if (iocb->ki_flags & IOCB_DONTCACHE) { + struct address_space *mapping = iocb->ki_filp->f_mapping; + + filemap_fdatawrite_range_kick(mapping, iocb->ki_pos, + iocb->ki_pos + count); } return count; @@ -3059,6 +3118,28 @@ static inline void allow_write_access(struct file *file) if (file) atomic_inc(&file_inode(file)->i_writecount); } + +/* + * Do not prevent write to executable file when watched by pre-content events. + * + * Note that FMODE_FSNOTIFY_HSM mode is set depending on pre-content watches at + * the time of file open and remains constant for entire lifetime of the file, + * so if pre-content watches are added post execution or removed before the end + * of the execution, it will not cause i_writecount reference leak. + */ +static inline int exe_file_deny_write_access(struct file *exe_file) +{ + if (unlikely(FMODE_FSNOTIFY_HSM(exe_file->f_mode))) + return 0; + return deny_write_access(exe_file); +} +static inline void exe_file_allow_write_access(struct file *exe_file) +{ + if (unlikely(!exe_file || FMODE_FSNOTIFY_HSM(exe_file->f_mode))) + return; + allow_write_access(exe_file); +} + static inline bool inode_is_open_for_write(const struct inode *inode) { return atomic_read(&inode->i_writecount) > 0; @@ -3351,7 +3432,7 @@ extern const struct file_operations generic_ro_fops; #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) -extern int readlink_copy(char __user *, int, const char *); +extern int readlink_copy(char __user *, int, const char *, int); extern int page_readlink(struct dentry *, char __user *, int); extern const char *page_get_link(struct dentry *, struct inode *, struct delayed_call *); @@ -3468,7 +3549,6 @@ struct offset_ctx { void simple_offset_init(struct offset_ctx *octx); int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry); void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry); -int simple_offset_empty(struct dentry *dentry); int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry); int simple_offset_rename_exchange(struct inode *old_dir, @@ -3614,6 +3694,14 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags, if (!(ki->ki_filp->f_mode & FMODE_CAN_ATOMIC_WRITE)) return -EOPNOTSUPP; } + if (flags & RWF_DONTCACHE) { + /* file system must support it */ + if (!(ki->ki_filp->f_op->fop_flags & FOP_DONTCACHE)) + return -EOPNOTSUPP; + /* DAX mappings not supported */ + if (IS_DAX(ki->ki_filp->f_mapping->host)) + return -EOPNOTSUPP; + } kiocb_flags |= (__force int) (flags & RWF_SUPPORTED); if (flags & RWF_SYNC) kiocb_flags |= IOCB_DSYNC; @@ -3707,11 +3795,9 @@ struct ctl_table; int __init list_bdev_fs_names(char *buf, size_t size); #define __FMODE_EXEC ((__force int) FMODE_EXEC) -#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY) #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) -#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ - (flag & __FMODE_NONOTIFY))) +#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE)) static inline bool is_sxid(umode_t mode) { diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h index 3cef566088fc..53e566efd5fd 100644 --- a/include/linux/fs_parser.h +++ b/include/linux/fs_parser.h @@ -84,6 +84,8 @@ extern int fs_lookup_param(struct fs_context *fc, extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found); +extern const struct constant_table bool_names[]; + #ifdef CONFIG_VALIDATE_FS_PARSER extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, int low, int high, int special); diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 278620e063ab..1a9ef8f6784d 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -108,38 +108,35 @@ static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) fsnotify_parent(dentry, mask, dentry, FSNOTIFY_EVENT_DENTRY); } -static inline int fsnotify_file(struct file *file, __u32 mask) +static inline int fsnotify_path(const struct path *path, __u32 mask) { - const struct path *path; + return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); +} +static inline int fsnotify_file(struct file *file, __u32 mask) +{ /* * FMODE_NONOTIFY are fds generated by fanotify itself which should not * generate new events. We also don't want to generate events for * FMODE_PATH fds (involves open & close events) as they are just * handle creation / destruction events and not "real" file events. */ - if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH)) + if (FMODE_FSNOTIFY_NONE(file->f_mode)) return 0; - path = &file->f_path; - /* Permission events require group prio >= FSNOTIFY_PRIO_CONTENT */ - if (mask & ALL_FSNOTIFY_PERM_EVENTS && - !fsnotify_sb_has_priority_watchers(path->dentry->d_sb, - FSNOTIFY_PRIO_CONTENT)) - return 0; - - return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); + return fsnotify_path(&file->f_path, mask); } #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS + +void file_set_fsnotify_mode(struct file *file); + /* * fsnotify_file_area_perm - permission hook before access to file range */ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, const loff_t *ppos, size_t count) { - __u32 fsnotify_mask = FS_ACCESS_PERM; - /* * filesystem may be modified in the context of permission events * (e.g. by HSM filling a file on access), so sb freeze protection @@ -147,14 +144,49 @@ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, */ lockdep_assert_once(file_write_not_started(file)); + if (!(perm_mask & (MAY_READ | MAY_WRITE | MAY_ACCESS))) + return 0; + + if (likely(!FMODE_FSNOTIFY_PERM(file->f_mode))) + return 0; + + /* + * read()/write() and other types of access generate pre-content events. + */ + if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) { + int ret = fsnotify_pre_content(&file->f_path, ppos, count); + + if (ret) + return ret; + } + if (!(perm_mask & MAY_READ)) return 0; - return fsnotify_file(file, fsnotify_mask); + /* + * read() also generates the legacy FS_ACCESS_PERM event, so content + * scanners can inspect the content filled by pre-content event. + */ + return fsnotify_path(&file->f_path, FS_ACCESS_PERM); +} + +/* + * fsnotify_truncate_perm - permission hook before file truncate + */ +static inline int fsnotify_truncate_perm(const struct path *path, loff_t length) +{ + struct inode *inode = d_inode(path->dentry); + + if (!(inode->i_sb->s_iflags & SB_I_ALLOW_HSM) || + !fsnotify_sb_has_priority_watchers(inode->i_sb, + FSNOTIFY_PRIO_PRE_CONTENT)) + return 0; + + return fsnotify_pre_content(path, &length, 0); } /* - * fsnotify_file_perm - permission hook before file access + * fsnotify_file_perm - permission hook before file access (unknown range) */ static inline int fsnotify_file_perm(struct file *file, int perm_mask) { @@ -168,22 +200,34 @@ static inline int fsnotify_open_perm(struct file *file) { int ret; + if (likely(!FMODE_FSNOTIFY_PERM(file->f_mode))) + return 0; + if (file->f_flags & __FMODE_EXEC) { - ret = fsnotify_file(file, FS_OPEN_EXEC_PERM); + ret = fsnotify_path(&file->f_path, FS_OPEN_EXEC_PERM); if (ret) return ret; } - return fsnotify_file(file, FS_OPEN_PERM); + return fsnotify_path(&file->f_path, FS_OPEN_PERM); } #else +static inline void file_set_fsnotify_mode(struct file *file) +{ +} + static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, const loff_t *ppos, size_t count) { return 0; } +static inline int fsnotify_truncate_perm(const struct path *path, loff_t length) +{ + return 0; +} + static inline int fsnotify_file_perm(struct file *file, int perm_mask) { return 0; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 3ecf7768e577..0d24a21a8e60 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -55,6 +55,9 @@ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ #define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ +/* #define FS_DIR_MODIFY 0x00080000 */ /* Deprecated (reserved) */ + +#define FS_PRE_ACCESS 0x00100000 /* Pre-content access hook */ /* * Set on inode mark that cares about things that happen to its children. @@ -77,8 +80,14 @@ */ #define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME) -#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ - FS_OPEN_EXEC_PERM) +/* Content events can be used to inspect file content */ +#define FSNOTIFY_CONTENT_PERM_EVENTS (FS_OPEN_PERM | FS_OPEN_EXEC_PERM | \ + FS_ACCESS_PERM) +/* Pre-content events can be used to fill file content */ +#define FSNOTIFY_PRE_CONTENT_EVENTS (FS_PRE_ACCESS) + +#define ALL_FSNOTIFY_PERM_EVENTS (FSNOTIFY_CONTENT_PERM_EVENTS | \ + FSNOTIFY_PRE_CONTENT_EVENTS) /* * This is a list of all events that may get sent to a parent that is watching @@ -285,6 +294,7 @@ static inline void fsnotify_group_assert_locked(struct fsnotify_group *group) /* When calling fsnotify tell it if the data is a path or inode */ enum fsnotify_data_type { FSNOTIFY_EVENT_NONE, + FSNOTIFY_EVENT_FILE_RANGE, FSNOTIFY_EVENT_PATH, FSNOTIFY_EVENT_INODE, FSNOTIFY_EVENT_DENTRY, @@ -297,6 +307,17 @@ struct fs_error_report { struct super_block *sb; }; +struct file_range { + const struct path *path; + loff_t pos; + size_t count; +}; + +static inline const struct path *file_range_path(const struct file_range *range) +{ + return range->path; +} + static inline struct inode *fsnotify_data_inode(const void *data, int data_type) { switch (data_type) { @@ -306,6 +327,8 @@ static inline struct inode *fsnotify_data_inode(const void *data, int data_type) return d_inode(data); case FSNOTIFY_EVENT_PATH: return d_inode(((const struct path *)data)->dentry); + case FSNOTIFY_EVENT_FILE_RANGE: + return d_inode(file_range_path(data)->dentry); case FSNOTIFY_EVENT_ERROR: return ((struct fs_error_report *)data)->inode; default: @@ -321,6 +344,8 @@ static inline struct dentry *fsnotify_data_dentry(const void *data, int data_typ return (struct dentry *)data; case FSNOTIFY_EVENT_PATH: return ((const struct path *)data)->dentry; + case FSNOTIFY_EVENT_FILE_RANGE: + return file_range_path(data)->dentry; default: return NULL; } @@ -332,6 +357,8 @@ static inline const struct path *fsnotify_data_path(const void *data, switch (data_type) { case FSNOTIFY_EVENT_PATH: return data; + case FSNOTIFY_EVENT_FILE_RANGE: + return file_range_path(data); default: return NULL; } @@ -347,6 +374,8 @@ static inline struct super_block *fsnotify_data_sb(const void *data, return ((struct dentry *)data)->d_sb; case FSNOTIFY_EVENT_PATH: return ((const struct path *)data)->dentry->d_sb; + case FSNOTIFY_EVENT_FILE_RANGE: + return file_range_path(data)->dentry->d_sb; case FSNOTIFY_EVENT_ERROR: return ((struct fs_error_report *) data)->sb; default: @@ -366,6 +395,18 @@ static inline struct fs_error_report *fsnotify_data_error_report( } } +static inline const struct file_range *fsnotify_data_file_range( + const void *data, + int data_type) +{ + switch (data_type) { + case FSNOTIFY_EVENT_FILE_RANGE: + return (struct file_range *)data; + default: + return NULL; + } +} + /* * Index to merged marks iterator array that correlates to a type of watch. * The type of watched object can be deduced from the iterator type, but not @@ -854,9 +895,17 @@ static inline void fsnotify_init_event(struct fsnotify_event *event) { INIT_LIST_HEAD(&event->list); } +int fsnotify_pre_content(const struct path *path, const loff_t *ppos, + size_t count); #else +static inline int fsnotify_pre_content(const struct path *path, + const loff_t *ppos, size_t count) +{ + return 0; +} + static inline int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *name, struct inode *inode, u32 cookie) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index aa9ddd1e4bb6..fbabc3d848b3 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -43,9 +43,8 @@ struct dyn_ftrace; char *arch_ftrace_match_adjust(char *str, const char *search); -#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL -struct fgraph_ret_regs; -unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs); +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS +unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs); #else unsigned long ftrace_return_to_handler(unsigned long frame_pointer); #endif @@ -134,6 +133,13 @@ extern int ftrace_enabled; * Also, architecture dependent fields can be used for internal process. * (e.g. orig_ax on x86_64) * + * Basically, ftrace_regs stores the registers related to the context. + * On function entry, registers for function parameters and hooking the + * function call are stored, and on function exit, registers for function + * return value and frame pointers are stored. + * + * And also, it dpends on the context that which registers are restored + * from the ftrace_regs. * On the function entry, those registers will be restored except for * the stack pointer, so that user can change the function parameters * and instruction pointer (e.g. live patching.) @@ -170,6 +176,12 @@ static inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs) #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0) #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ +#ifdef CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS + +static_assert(sizeof(struct pt_regs) == ftrace_regs_size()); + +#endif /* CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */ + static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs) { if (!fregs) @@ -178,6 +190,54 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs return arch_ftrace_get_regs(fregs); } +#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \ + defined(CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS) + +static __always_inline struct pt_regs * +ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs) +{ + /* + * If CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS=y, ftrace_regs memory + * layout is including pt_regs. So always returns that address. + * Since arch_ftrace_get_regs() will check some members and may return + * NULL, we can not use it. + */ + return &arch_ftrace_regs(fregs)->regs; +} + +#endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */ + +#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS + +/* + * Please define arch dependent pt_regs which compatible to the + * perf_arch_fetch_caller_regs() but based on ftrace_regs. + * This requires + * - user_mode(_regs) returns false (always kernel mode). + * - able to use the _regs for stack trace. + */ +#ifndef arch_ftrace_fill_perf_regs +/* As same as perf_arch_fetch_caller_regs(), do nothing by default */ +#define arch_ftrace_fill_perf_regs(fregs, _regs) do {} while (0) +#endif + +static __always_inline struct pt_regs * +ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs) +{ + arch_ftrace_fill_perf_regs(fregs, regs); + return regs; +} + +#else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ + +static __always_inline struct pt_regs * +ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs) +{ + return &arch_ftrace_regs(fregs)->regs; +} + +#endif + /* * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs. * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs. @@ -190,6 +250,23 @@ static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs) return ftrace_get_regs(fregs) != NULL; } +#ifdef CONFIG_HAVE_REGS_AND_STACK_ACCESS_API +static __always_inline unsigned long +ftrace_regs_get_kernel_stack_nth(struct ftrace_regs *fregs, unsigned int nth) +{ + unsigned long *stackp; + + stackp = (unsigned long *)ftrace_regs_get_stack_pointer(fregs); + if (((unsigned long)(stackp + nth) & ~(THREAD_SIZE - 1)) == + ((unsigned long)stackp & ~(THREAD_SIZE - 1))) + return *(stackp + nth); + + return 0; +} +#else /* !CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */ +#define ftrace_regs_get_kernel_stack_nth(fregs, nth) (0L) +#endif /* CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */ + typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs); @@ -545,6 +622,19 @@ enum { FTRACE_MAY_SLEEP = (1 << 5), }; +/* Arches can override ftrace_get_symaddr() to convert fentry_ip to symaddr. */ +#ifndef ftrace_get_symaddr +/** + * ftrace_get_symaddr - return the symbol address from fentry_ip + * @fentry_ip: the address of ftrace location + * + * Get the symbol address from @fentry_ip (fast path). If there is no fast + * search path, this returns 0. + * User may need to use kallsyms API to find the symbol address. + */ +#define ftrace_get_symaddr(fentry_ip) (0) +#endif + #ifdef CONFIG_DYNAMIC_FTRACE void ftrace_arch_code_modify_prepare(void); @@ -1061,20 +1151,21 @@ struct ftrace_graph_ret { int depth; /* Number of functions that overran the depth limit for current task */ unsigned int overrun; - unsigned long long calltime; - unsigned long long rettime; } __packed; struct fgraph_ops; /* Type of the callback handlers for tracing function graph*/ typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *, - struct fgraph_ops *); /* return */ + struct fgraph_ops *, + struct ftrace_regs *); /* return */ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *, - struct fgraph_ops *); /* entry */ + struct fgraph_ops *, + struct ftrace_regs *); /* entry */ extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, - struct fgraph_ops *gops); + struct fgraph_ops *gops, + struct ftrace_regs *fregs); bool ftrace_pids_enabled(struct ftrace_ops *ops); #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -1114,8 +1205,15 @@ struct ftrace_ret_stack { extern void return_to_handler(void); extern int -function_graph_enter(unsigned long ret, unsigned long func, - unsigned long frame_pointer, unsigned long *retp); +function_graph_enter_regs(unsigned long ret, unsigned long func, + unsigned long frame_pointer, unsigned long *retp, + struct ftrace_regs *fregs); + +static inline int function_graph_enter(unsigned long ret, unsigned long func, + unsigned long fp, unsigned long *retp) +{ + return function_graph_enter_regs(ret, func, fp, retp, NULL); +} struct ftrace_ret_stack * ftrace_graph_get_ret_stack(struct task_struct *task, int skip); diff --git a/include/linux/ftrace_regs.h b/include/linux/ftrace_regs.h index be1ed0c891d0..bbc1873ca6b8 100644 --- a/include/linux/ftrace_regs.h +++ b/include/linux/ftrace_regs.h @@ -30,6 +30,8 @@ struct ftrace_regs; override_function_with_return(&arch_ftrace_regs(fregs)->regs) #define ftrace_regs_query_register_offset(name) \ regs_query_register_offset(name) +#define ftrace_regs_get_frame_pointer(fregs) \ + frame_pointer(&arch_ftrace_regs(fregs)->regs) #endif /* HAVE_ARCH_FTRACE_REGS */ diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 0d79070c5a70..0731994b9d7c 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -112,6 +112,7 @@ struct fwnode_reference_args { * @device_is_available: Return true if the device is available. * @device_get_match_data: Return the device driver match data. * @property_present: Return true if a property is present. + * @property_read_bool: Return a boolean property value. * @property_read_int_array: Read an array of integer properties. Return zero on * success, a negative error code otherwise. * @property_read_string_array: Read an array of string properties. Return zero @@ -141,6 +142,8 @@ struct fwnode_operations { (*device_get_dma_attr)(const struct fwnode_handle *fwnode); bool (*property_present)(const struct fwnode_handle *fwnode, const char *propname); + bool (*property_read_bool)(const struct fwnode_handle *fwnode, + const char *propname); int (*property_read_int_array)(const struct fwnode_handle *fwnode, const char *propname, unsigned int elem_size, void *val, diff --git a/include/linux/gfp.h b/include/linux/gfp.h index b0fe9f62d15b..6bb1a5a7a4ae 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -212,35 +212,31 @@ struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, - struct list_head *page_list, struct page **page_array); #define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__)) -unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, +unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp, unsigned long nr_pages, struct page **page_array); -#define alloc_pages_bulk_array_mempolicy(...) \ - alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__)) +#define alloc_pages_bulk_mempolicy(...) \ + alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__)) /* Bulk allocate order-0 pages */ -#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \ - __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL) - -#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \ - __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array) +#define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \ + __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array) static inline unsigned long -alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, +alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) { if (nid == NUMA_NO_NODE) nid = numa_mem_id(); - return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array); + return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array); } -#define alloc_pages_bulk_array_node(...) \ - alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__)) +#define alloc_pages_bulk_node(...) \ + alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__)) static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) { @@ -300,8 +296,6 @@ static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, #ifdef CONFIG_NUMA struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order); -struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, - struct mempolicy *mpol, pgoff_t ilx, int nid); struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid); @@ -312,11 +306,6 @@ static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order { return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); } -static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, - struct mempolicy *mpol, pgoff_t ilx, int nid) -{ - return alloc_pages_noprof(gfp, order); -} static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) { return __folio_alloc_node_noprof(gfp, order, numa_node_id()); @@ -331,7 +320,6 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde #endif #define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__)) -#define alloc_pages_mpol(...) alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__)) #define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__)) #define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__)) #define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__)) diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 455f855bc084..96bda41d9148 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -445,7 +445,6 @@ ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size); ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame, void *buffer, size_t size); -int hdmi_infoframe_check(union hdmi_infoframe *frame); int hdmi_infoframe_unpack(union hdmi_infoframe *frame, const void *buffer, size_t size); void hdmi_infoframe_log(const char *level, struct device *dev, diff --git a/include/linux/hid-over-i2c.h b/include/linux/hid-over-i2c.h new file mode 100644 index 000000000000..3b1a0208a6b8 --- /dev/null +++ b/include/linux/hid-over-i2c.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright 2024 Intel Corporation */ + +#include <linux/bits.h> + +#ifndef _HID_OVER_I2C_H_ +#define _HID_OVER_I2C_H_ + +#define HIDI2C_REG_LEN sizeof(__le16) + +/* Input report type definition in HIDI2C protocol */ +enum hidi2c_report_type { + HIDI2C_RESERVED = 0, + HIDI2C_INPUT, + HIDI2C_OUTPUT, + HIDI2C_FEATURE, +}; + +/* Power state type definition in HIDI2C protocol */ +enum hidi2c_power_state { + HIDI2C_ON, + HIDI2C_SLEEP, +}; + +/* Opcode type definition in HIDI2C protocol */ +enum hidi2c_opcode { + HIDI2C_RESET = 1, + HIDI2C_GET_REPORT, + HIDI2C_SET_REPORT, + HIDI2C_GET_IDLE, + HIDI2C_SET_IDLE, + HIDI2C_GET_PROTOCOL, + HIDI2C_SET_PROTOCOL, + HIDI2C_SET_POWER, +}; + +/** + * struct hidi2c_report_packet - Report packet definition in HIDI2C protocol + * @len: data field length + * @data: HIDI2C report packet data + */ +struct hidi2c_report_packet { + __le16 len; + u8 data[]; +} __packed; + +#define HIDI2C_LENGTH_LEN sizeof(__le16) + +#define HIDI2C_PACKET_LEN(data_len) ((data_len) + HIDI2C_LENGTH_LEN) +#define HIDI2C_DATA_LEN(pkt_len) ((pkt_len) - HIDI2C_LENGTH_LEN) + +#define HIDI2C_CMD_MAX_RI 0x0F + +/** + * HIDI2C command data packet - Command packet definition in HIDI2C protocol + * @report_id: [0:3] report id (<15) for features or output reports + * @report_type: [4:5] indicate report type, reference to hidi2c_report_type + * @reserved0: [6:7] reserved bits + * @opcode: [8:11] command operation code, reference to hidi2c_opcode + * @reserved1: [12:15] reserved bits + * @report_id_optional: [23:16] appended 3rd byte. + * If the report_id in the low byte is set to the + * sentinel value (HIDI2C_CMD_MAX_RI), then this + * optional third byte represents the report id (>=15) + * Otherwise, not this 3rd byte. + */ + +#define HIDI2C_CMD_LEN sizeof(__le16) +#define HIDI2C_CMD_LEN_OPT (sizeof(__le16) + 1) +#define HIDI2C_CMD_REPORT_ID GENMASK(3, 0) +#define HIDI2C_CMD_REPORT_TYPE GENMASK(5, 4) +#define HIDI2C_CMD_OPCODE GENMASK(11, 8) +#define HIDI2C_CMD_OPCODE GENMASK(11, 8) +#define HIDI2C_CMD_3RD_BYTE GENMASK(23, 16) + +#define HIDI2C_HID_DESC_BCDVERSION 0x100 + +/** + * struct hidi2c_dev_descriptor - HIDI2C device descriptor definition + * @dev_desc_len: The length of the complete device descriptor, fixed to 0x1E (30). + * @bcd_ver: The version number of the HIDI2C protocol supported. + * In binary coded decimal (BCD) format. + * @report_desc_len: The length of the report descriptor + * @report_desc_reg: The register address to retrieve report descriptor + * @input_reg: the register address to retrieve input report + * @max_input_len: The length of the largest possible HID input (or feature) report + * @output_reg: the register address to send output report + * @max_output_len: The length of the largest output (or feature) report + * @cmd_reg: the register address to send command + * @data_reg: the register address to send command data + * @vendor_id: Device manufacturers vendor ID + * @product_id: Device unique model/product ID + * @version_id: Device’s unique version + * @reserved0: Reserved and should be 0 + * @reserved1: Reserved and should be 0 + */ +struct hidi2c_dev_descriptor { + __le16 dev_desc_len; + __le16 bcd_ver; + __le16 report_desc_len; + __le16 report_desc_reg; + __le16 input_reg; + __le16 max_input_len; + __le16 output_reg; + __le16 max_output_len; + __le16 cmd_reg; + __le16 data_reg; + __le16 vendor_id; + __le16 product_id; + __le16 version_id; + __le16 reserved0; + __le16 reserved1; +} __packed; + +#define HIDI2C_DEV_DESC_LEN sizeof(struct hidi2c_dev_descriptor) + +#endif /* _HID_OVER_I2C_H_ */ diff --git a/include/linux/hid-over-spi.h b/include/linux/hid-over-spi.h new file mode 100644 index 000000000000..da5a14b5e89b --- /dev/null +++ b/include/linux/hid-over-spi.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright 2024 Intel Corporation */ + +#ifndef _HID_OVER_SPI_H_ +#define _HID_OVER_SPI_H_ + +#include <linux/bits.h> +#include <linux/types.h> + +/* Input report type definition in HIDSPI protocol */ +enum input_report_type { + INVALID_INPUT_REPORT_TYPE_0 = 0, + DATA = 1, + INVALID_TYPE_2 = 2, + RESET_RESPONSE = 3, + COMMAND_RESPONSE = 4, + GET_FEATURE_RESPONSE = 5, + INVALID_TYPE_6 = 6, + DEVICE_DESCRIPTOR_RESPONSE = 7, + REPORT_DESCRIPTOR_RESPONSE = 8, + SET_FEATURE_RESPONSE = 9, + OUTPUT_REPORT_RESPONSE = 10, + GET_INPUT_REPORT_RESPONSE = 11, + INVALID_INPUT_REPORT_TYPE = 0xF, +}; + +/* Output report type definition in HIDSPI protocol */ +enum output_report_type { + INVALID_OUTPUT_REPORT_TYPE_0 = 0, + DEVICE_DESCRIPTOR = 1, + REPORT_DESCRIPTOR = 2, + SET_FEATURE = 3, + GET_FEATURE = 4, + OUTPUT_REPORT = 5, + GET_INPUT_REPORT = 6, + COMMAND_CONTENT = 7, +}; + +/* Set power command ID for output report */ +#define HIDSPI_SET_POWER_CMD_ID 1 + +/* Power state definition in HIDSPI protocol */ +enum hidspi_power_state { + HIDSPI_ON = 1, + HIDSPI_SLEEP = 2, + HIDSPI_OFF = 3, +}; + +/** + * Input report header definition in HIDSPI protocol + * Report header size is 32bits, it includes: + * protocol_ver: [0:3] Current supported HIDSPI protocol version, must be 0x3 + * reserved0: [4:7] Reserved bits + * input_report_len: [8:21] Input report length in number bytes divided by 4 + * last_frag_flag: [22]Indicate if this packet is last fragment. + * 1 - indicates last fragment + * 0 - indicates additional fragments + * reserved1: [23] Reserved bits + * @sync_const: [24:31] Used to validate input report header, must be 0x5A + */ +#define HIDSPI_INPUT_HEADER_SIZE sizeof(u32) +#define HIDSPI_INPUT_HEADER_VER GENMASK(3, 0) +#define HIDSPI_INPUT_HEADER_REPORT_LEN GENMASK(21, 8) +#define HIDSPI_INPUT_HEADER_LAST_FLAG BIT(22) +#define HIDSPI_INPUT_HEADER_SYNC GENMASK(31, 24) + +/** + * struct input_report_body_header - Input report body header definition in HIDSPI protocol + * @input_report_type: indicate input report type, reference to enum input_report_type + * @content_len: this input report body packet length + * @content_id: indicate this input report's report id + */ +struct input_report_body_header { + u8 input_report_type; + __le16 content_len; + u8 content_id; +} __packed; + +#define HIDSPI_INPUT_BODY_HEADER_SIZE sizeof(struct input_report_body_header) + +/** + * struct input_report_body - Input report body definition in HIDSPI protocol + * @body_hdr: input report body header + * @content: input report body content + */ +struct input_report_body { + struct input_report_body_header body_hdr; + u8 content[]; +} __packed; + +#define HIDSPI_INPUT_BODY_SIZE(content_len) ((content_len) + HIDSPI_INPUT_BODY_HEADER_SIZE) + +/** + * struct output_report_header - Output report header definition in HIDSPI protocol + * @report_type: output report type, reference to enum output_report_type + * @content_len: length of content + * @content_id: 0x00 - descriptors + * report id - Set/Feature feature or Input/Output Reports + * command opcode - for commands + */ +struct output_report_header { + u8 report_type; + __le16 content_len; + u8 content_id; +} __packed; + +#define HIDSPI_OUTPUT_REPORT_HEADER_SIZE sizeof(struct output_report_header) + +/** + * struct output_report - Output report definition in HIDSPI protocol + * @output_hdr: output report header + * @content: output report content + */ +struct output_report { + struct output_report_header output_hdr; + u8 content[]; +} __packed; + +#define HIDSPI_OUTPUT_REPORT_SIZE(content_len) ((content_len) + HIDSPI_OUTPUT_REPORT_HEADER_SIZE) + +/** + * struct hidspi_dev_descriptor - HIDSPI device descriptor definition + * @dev_desc_len: The length of the complete device descriptor, fixed to 0x18 (24). + * @bcd_ver: The version number of the HIDSPI protocol supported. + * In binary coded decimal (BCD) format. Must be fixed to 0x0300. + * @rep_desc_len: The length of the report descriptor + * @max_input_len: The length of the largest possible HID input (or feature) report + * @max_output_len: The length of the largest output (or feature) report + * @max_frag_len: The length of the largest fragment, where a fragment represents + * the body of an input report. + * @vendor_id: Device manufacturers vendor ID + * @product_id: Device unique model/product ID + * @version_id: Device’s unique version + * @flags: Specify flags for the device’s operation + * @reserved: Reserved and should be 0 + */ +struct hidspi_dev_descriptor { + __le16 dev_desc_len; + __le16 bcd_ver; + __le16 rep_desc_len; + __le16 max_input_len; + __le16 max_output_len; + __le16 max_frag_len; + __le16 vendor_id; + __le16 product_id; + __le16 version_id; + __le16 flags; + __le32 reserved; +}; + +#define HIDSPI_DEVICE_DESCRIPTOR_SIZE sizeof(struct hidspi_dev_descriptor) +#define HIDSPI_INPUT_DEVICE_DESCRIPTOR_SIZE \ + (HIDSPI_INPUT_BODY_HEADER_SIZE + HIDSPI_DEVICE_DESCRIPTOR_SIZE) + +#endif /* _HID_OVER_SPI_H_ */ diff --git a/include/linux/hid.h b/include/linux/hid.h index d11e9c9a5f15..cdc0dc13c87f 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -218,6 +218,7 @@ struct hid_item { #define HID_GD_DOWN 0x00010091 #define HID_GD_RIGHT 0x00010092 #define HID_GD_LEFT 0x00010093 +#define HID_GD_DO_NOT_DISTURB 0x0001009b /* Microsoft Win8 Wireless Radio Controls CA usage codes */ #define HID_GD_RFKILL_BTN 0x000100c6 #define HID_GD_RFKILL_LED 0x000100c7 diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 6dbd0d49628f..99fcf65d575f 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -97,6 +97,8 @@ /* page number for queue file region */ #define QM_DOORBELL_PAGE_NR 1 +#define QM_DEV_ALG_MAX_LEN 256 + /* uacce mode of the driver */ #define UACCE_MODE_NOUACCE 0 /* don't use uacce */ #define UACCE_MODE_SVA 1 /* use uacce sva mode */ @@ -122,6 +124,7 @@ enum qm_hw_ver { QM_HW_V1 = 0x20, QM_HW_V2 = 0x21, QM_HW_V3 = 0x30, + QM_HW_V4 = 0x50, }; enum qm_fun_type { @@ -156,6 +159,7 @@ enum qm_cap_bits { QM_SUPPORT_MB_COMMAND, QM_SUPPORT_SVA_PREFETCH, QM_SUPPORT_RPM, + QM_SUPPORT_DAE, }; struct qm_dev_alg { @@ -266,6 +270,8 @@ struct hisi_qm_err_ini { void (*show_last_dfx_regs)(struct hisi_qm *qm); void (*err_info_init)(struct hisi_qm *qm); enum acc_err_result (*get_err_result)(struct hisi_qm *qm); + bool (*dev_is_abnormal)(struct hisi_qm *qm); + int (*set_priv_status)(struct hisi_qm *qm); }; struct hisi_qm_cap_info { @@ -392,6 +398,8 @@ struct hisi_qm { struct mutex mailbox_lock; + struct mutex ifc_lock; + const struct hisi_qm_hw_ops *ops; struct qm_debug debug; diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 7ef5f7ef31a9..f7bfdcf0dda3 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -386,6 +386,7 @@ extern void __init hrtimers_init(void); extern void sysrq_timer_list_show(void); int hrtimers_prepare_cpu(unsigned int cpu); +int hrtimers_cpu_starting(unsigned int cpu); #ifdef CONFIG_HOTPLUG_CPU int hrtimers_cpu_dying(unsigned int cpu); #else diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index b94c2e8ee918..93e509b6c00e 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -121,6 +121,8 @@ enum mthp_stat_item { MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, MTHP_STAT_ZSWPOUT, MTHP_STAT_SWPIN, + MTHP_STAT_SWPIN_FALLBACK, + MTHP_STAT_SWPIN_FALLBACK_CHARGE, MTHP_STAT_SWPOUT, MTHP_STAT_SWPOUT_FALLBACK, MTHP_STAT_SHMEM_ALLOC, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ae4fe8615bb6..ec8c0ccc8f95 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -153,11 +153,11 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); -bool isolate_hugetlb(struct folio *folio, struct list_head *list); +bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list); int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison); int get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared); -void folio_putback_active_hugetlb(struct folio *folio); +void folio_putback_hugetlb(struct folio *folio); void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason); void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; @@ -414,7 +414,7 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, return NULL; } -static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list) +static inline bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list) { return false; } @@ -430,7 +430,7 @@ static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags, return 0; } -static inline void folio_putback_active_hugetlb(struct folio *folio) +static inline void folio_putback_hugetlb(struct folio *folio) { } @@ -681,8 +681,9 @@ struct huge_bootmem_page { }; int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); +int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, - unsigned long addr, int avoid_reserve); + unsigned long addr, bool cow_from_owner); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback); @@ -1059,9 +1060,15 @@ static inline int isolate_or_dissolve_huge_page(struct page *page, return -ENOMEM; } +static inline int replace_free_hugepage_folios(unsigned long start_pfn, + unsigned long end_pfn) +{ + return 0; +} + static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, - int avoid_reserve) + bool cow_from_owner) { return NULL; } diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 02a226bcf0ed..4179add2864b 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -24,7 +24,7 @@ #include <linux/mod_devicetable.h> #include <linux/interrupt.h> #include <linux/reciprocal_div.h> -#include <asm/hyperv-tlfs.h> +#include <hyperv/hvhdk.h> #define MAX_PAGE_BUFFER_COUNT 32 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ @@ -768,15 +768,6 @@ struct vmbus_close_msg { struct vmbus_channel_close_channel msg; }; -/* Define connection identifier type. */ -union hv_connection_id { - u32 asu32; - struct { - u32 id:24; - u32 reserved:8; - } u; -}; - enum vmbus_device_type { HV_IDE = 0, HV_SCSI, diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 388ce71a29a9..c31fd1dba3bd 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -244,7 +244,6 @@ enum i2c_driver_flags { * @id_table: List of I2C devices supported by this driver * @detect: Callback for device detection * @address_list: The I2C addresses to probe (for detect) - * @clients: List of detected clients we created (for i2c-core use only) * @flags: A bitmask of flags defined in &enum i2c_driver_flags * * The driver.owner field should be set to the module owner of this driver. @@ -299,7 +298,6 @@ struct i2c_driver { /* Device detection callback for automatic device creation */ int (*detect)(struct i2c_client *client, struct i2c_board_info *info); const unsigned short *address_list; - struct list_head clients; u32 flags; }; @@ -315,12 +313,12 @@ struct i2c_driver { * @dev: Driver model device node for the slave. * @init_irq: IRQ that was set at initialization * @irq: indicates the IRQ generated by this device (if any) - * @detected: member of an i2c_driver.clients list or i2c-core's - * userspace_devices list * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter * calls it to pass on slave events to the slave driver. * @devres_group_id: id of the devres group that will be created for resources * acquired when probing this device. + * @debugfs: pointer to the debugfs subdirectory which the I2C core created + * for this client. * * An i2c_client identifies a single device (i.e. chip) connected to an * i2c bus. The behaviour exposed to Linux is defined by the driver @@ -334,6 +332,8 @@ struct i2c_client { #define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ #define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */ #define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ +#define I2C_CLIENT_AUTO 0x100 /* client was auto-detected */ +#define I2C_CLIENT_USER 0x200 /* client was userspace-created */ #define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ /* Must match I2C_M_STOP|IGNORE_NAK */ @@ -345,11 +345,11 @@ struct i2c_client { struct device dev; /* the device structure */ int init_irq; /* irq set at initialization */ int irq; /* irq issued by device */ - struct list_head detected; #if IS_ENABLED(CONFIG_I2C_SLAVE) i2c_slave_cb_t slave_cb; /* callback for slave mode */ #endif void *devres_group_id; /* ID of probe devres group */ + struct dentry *debugfs; /* per-client debugfs dir */ }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) @@ -751,9 +751,6 @@ struct i2c_adapter { char name[48]; struct completion dev_released; - struct mutex userspace_clients_lock; - struct list_head userspace_clients; - struct i2c_bus_recovery_info *bus_recovery_info; const struct i2c_adapter_quirks *quirks; diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h index 0a8a44ac2f02..b674f64d0822 100644 --- a/include/linux/i3c/device.h +++ b/include/linux/i3c/device.h @@ -283,7 +283,7 @@ static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv, * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C * driver * @__i3cdrv: the I3C driver to register - * @__i2cdrv: the I3C driver to register + * @__i2cdrv: the I2C driver to register * * Provide generic init/exit functions that simply register/unregister an I3C * and an I2C driver. diff --git a/include/linux/i8042.h b/include/linux/i8042.h index 95b07f8b77fe..00037c13abc8 100644 --- a/include/linux/i8042.h +++ b/include/linux/i8042.h @@ -54,15 +54,29 @@ struct serio; +/** + * typedef i8042_filter_t - i8042 filter callback + * @data: Data received by the i8042 controller + * @str: Status register of the i8042 controller + * @serio: Serio of the i8042 controller + * @context: Context pointer associated with this callback + * + * This represents a i8042 filter callback which can be used with i8042_install_filter() + * and i8042_remove_filter() to filter the i8042 input for platform-specific key codes. + * + * Context: Interrupt context. + * Returns: true if the data should be filtered out, false if otherwise. + */ +typedef bool (*i8042_filter_t)(unsigned char data, unsigned char str, struct serio *serio, + void *context); + #if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE) void i8042_lock_chip(void); void i8042_unlock_chip(void); int i8042_command(unsigned char *param, int command); -int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, - struct serio *serio)); -int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, - struct serio *serio)); +int i8042_install_filter(i8042_filter_t filter, void *context); +int i8042_remove_filter(i8042_filter_t filter); #else @@ -79,14 +93,12 @@ static inline int i8042_command(unsigned char *param, int command) return -ENODEV; } -static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, - struct serio *serio)) +static inline int i8042_install_filter(i8042_filter_t filter, void *context) { return -ENODEV; } -static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, - struct serio *serio)) +static inline int i8042_remove_filter(i8042_filter_t filter) { return -ENODEV; } diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 05dedc45505c..16741e542e81 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1532,6 +1532,17 @@ struct ieee80211_mgmt { struct { u8 action_code; } __packed ttlm_tear_down; + struct { + u8 action_code; + u8 dialog_token; + u8 variable[]; + } __packed ml_reconf_req; + struct { + u8 action_code; + u8 dialog_token; + u8 count; + u8 variable[]; + } __packed ml_reconf_resp; } u; } __packed action; DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */ @@ -1542,11 +1553,13 @@ struct ieee80211_mgmt { #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 #define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 #define BSS_MEMBERSHIP_SELECTOR_GLK 125 -#define BSS_MEMBERSHIP_SELECTOR_EPS 124 +#define BSS_MEMBERSHIP_SELECTOR_EPD 124 #define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123 #define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122 #define BSS_MEMBERSHIP_SELECTOR_EHT_PHY 121 +#define BSS_MEMBERSHIP_SELECTOR_MIN BSS_MEMBERSHIP_SELECTOR_EHT_PHY + /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) @@ -3883,6 +3896,16 @@ enum ieee80211_protected_eht_actioncode { WLAN_PROTECTED_EHT_ACTION_TTLM_REQ = 0, WLAN_PROTECTED_EHT_ACTION_TTLM_RES = 1, WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN = 2, + WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_REQ = 3, + WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP = 4, + WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN = 5, + WLAN_PROTECTED_EHT_ACTION_EML_OP_MODE_NOTIF = 6, + WLAN_PROTECTED_EHT_ACTION_LINK_RECOMMEND = 7, + WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_REQ = 8, + WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_RESP = 9, + WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_NOTIF = 10, + WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_REQ = 11, + WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP = 12, }; /* Security key length */ @@ -4961,6 +4984,7 @@ struct ieee80211_multi_link_elem { #define IEEE80211_MLC_BASIC_PRES_EML_CAPA 0x0080 #define IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP 0x0100 #define IEEE80211_MLC_BASIC_PRES_MLD_ID 0x0200 +#define IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP 0x0400 #define IEEE80211_MED_SYNC_DELAY_DURATION 0x00ff #define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH 0x0f00 @@ -5018,6 +5042,8 @@ struct ieee80211_multi_link_elem { #define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_DIFF 3 #define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80 #define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000 +#define IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT 0x2000 +#define IEEE80211_MLD_CAP_OP_ALIGNED_TWT_SUPPORT 0x4000 struct ieee80211_mle_basic_common_info { u8 len; @@ -5033,6 +5059,9 @@ struct ieee80211_mle_preq_common_info { } __packed; #define IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR 0x0010 +#define IEEE80211_MLC_RECONF_PRES_EML_CAPA 0x0020 +#define IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP 0x0040 +#define IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP 0x0080 /* no fixed fields in RECONF */ @@ -5055,28 +5084,24 @@ static inline u8 ieee80211_mle_common_size(const u8 *data) { const struct ieee80211_multi_link_elem *mle = (const void *)data; u16 control = le16_to_cpu(mle->control); - u8 common = 0; switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) { case IEEE80211_ML_CONTROL_TYPE_BASIC: case IEEE80211_ML_CONTROL_TYPE_PREQ: case IEEE80211_ML_CONTROL_TYPE_TDLS: case IEEE80211_ML_CONTROL_TYPE_RECONF: + case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS: /* * The length is the first octet pointed by mle->variable so no * need to add anything */ break; - case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS: - if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR) - common += ETH_ALEN; - return common; default: WARN_ON(1); return 0; } - return sizeof(*mle) + common + mle->variable[0]; + return sizeof(*mle) + mle->variable[0]; } /** @@ -5227,6 +5252,47 @@ static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data) } /** + * ieee80211_mle_get_ext_mld_capa_op - returns the extended MLD capabilities + * and operations. + * @data: pointer to the multi-link element + * Return: the extended MLD capabilities and operations field value from + * the multi-link element, or 0 if not present + * + * The element is assumed to be of the correct type (BASIC) and big enough, + * this must be checked using ieee80211_mle_type_ok(). + */ +static inline u16 ieee80211_mle_get_ext_mld_capa_op(const u8 *data) +{ + const struct ieee80211_multi_link_elem *mle = (const void *)data; + u16 control = le16_to_cpu(mle->control); + const u8 *common = mle->variable; + + /* + * common points now at the beginning of + * ieee80211_mle_basic_common_info + */ + common += sizeof(struct ieee80211_mle_basic_common_info); + + if (!(control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP)) + return 0; + + if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID) + common += 1; + if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT) + common += 1; + if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY) + common += 2; + if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA) + common += 2; + if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP) + common += 2; + if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID) + common += 1; + + return get_unaligned_le16(common); +} + +/** * ieee80211_mle_get_mld_id - returns the MLD ID * @data: pointer to the multi-link element * Return: The MLD ID in the given multi-link element, or 0 if not present @@ -5298,6 +5364,8 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len) common += 2; if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID) common += 1; + if (control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP) + common += 2; break; case IEEE80211_ML_CONTROL_TYPE_PREQ: common += sizeof(struct ieee80211_mle_preq_common_info); @@ -5308,14 +5376,19 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len) case IEEE80211_ML_CONTROL_TYPE_RECONF: if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR) common += ETH_ALEN; + if (control & IEEE80211_MLC_RECONF_PRES_EML_CAPA) + common += 2; + if (control & IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP) + common += 2; + if (control & IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP) + common += 2; break; case IEEE80211_ML_CONTROL_TYPE_TDLS: common += sizeof(struct ieee80211_mle_tdls_common_info); check_common_len = true; break; case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS: - if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR) - common += ETH_ALEN; + common = ETH_ALEN + 1; break; default: /* we don't know this type */ @@ -5458,8 +5531,13 @@ ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta #define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE 0x0010 #define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT 0x0020 #define IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT 0x0040 -#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_UPDATE_TYPE 0x0780 -#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT 0x0800 +#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE 0x0780 +#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_AP_REM 0 +#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_OP_PARAM_UPDATE 1 +#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_ADD_LINK 2 +#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_DEL_LINK 3 +#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_NSTR_STATUS 4 +#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT 0x0800 /** * ieee80211_mle_reconf_sta_prof_size_ok - validate reconfiguration multi-link diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h index 0404f5bf4f30..d7941fd88032 100644 --- a/include/linux/if_hsr.h +++ b/include/linux/if_hsr.h @@ -13,6 +13,15 @@ enum hsr_version { PRP_V1, }; +enum hsr_port_type { + HSR_PT_NONE = 0, /* Must be 0, used by framereg */ + HSR_PT_SLAVE_A, + HSR_PT_SLAVE_B, + HSR_PT_INTERLINK, + HSR_PT_MASTER, + HSR_PT_PORTS, /* This must be the last item in the enum */ +}; + /* HSR Tag. * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB, * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest, @@ -32,6 +41,8 @@ struct hsr_tag { #if IS_ENABLED(CONFIG_HSR) extern bool is_hsr_master(struct net_device *dev); extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver); +struct net_device *hsr_get_port_ndev(struct net_device *ndev, + enum hsr_port_type pt); #else static inline bool is_hsr_master(struct net_device *dev) { @@ -42,6 +53,12 @@ static inline int hsr_get_version(struct net_device *dev, { return -EINVAL; } + +static inline struct net_device *hsr_get_port_ndev(struct net_device *ndev, + enum hsr_port_type pt) +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_HSR */ #endif /*_LINUX_IF_HSR_H_*/ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index d65b5d71b93b..38456b42cdb5 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -176,6 +176,7 @@ struct netpoll; * @real_dev_addr: address of underlying netdevice * @dent: proc dir entry * @vlan_pcpu_stats: ptr to percpu rx stats + * @netpoll: netpoll instance "propagated" down to @real_dev */ struct vlan_dev_priv { unsigned int nr_ingress_mappings; @@ -310,7 +311,7 @@ static inline bool vlan_uses_dev(const struct net_device *dev) * eth_type_vlan - check for valid vlan ether type. * @ethertype: ether type to check * - * Returns true if the ether type is a vlan ether type. + * Returns: true if the ether type is a vlan ether type. */ static inline bool eth_type_vlan(__be16 ethertype) { @@ -341,9 +342,9 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, * @mac_len: MAC header length including outer vlan headers * * Inserts the VLAN tag into @skb as part of the payload at offset mac_len - * Returns error if skb_cow_head fails. - * * Does not change skb->protocol so this function can be used during receive. + * + * Returns: error if skb_cow_head fails. */ static inline int __vlan_insert_inner_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci, @@ -390,9 +391,9 @@ static inline int __vlan_insert_inner_tag(struct sk_buff *skb, * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload - * Returns error if skb_cow_head fails. - * * Does not change skb->protocol so this function can be used during receive. + * + * Returns: error if skb_cow_head fails. */ static inline int __vlan_insert_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) @@ -414,6 +415,8 @@ static inline int __vlan_insert_tag(struct sk_buff *skb, * doesn't have to worry about freeing the original skb. * * Does not change skb->protocol so this function can be used during receive. + * + * Return: modified @skb on success, NULL on error (@skb is freed). */ static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, __be16 vlan_proto, @@ -443,6 +446,8 @@ static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, * doesn't have to worry about freeing the original skb. * * Does not change skb->protocol so this function can be used during receive. + * + * Return: modified @skb on success, NULL on error (@skb is freed). */ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) @@ -461,6 +466,8 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, * * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. + * + * Return: modified @skb on success, NULL on error (@skb is freed). */ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, __be16 vlan_proto, @@ -533,7 +540,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, * @skb: skbuff to query * @vlan_tci: buffer to store value * - * Returns error if the skb is not of VLAN type + * Returns: error if the skb is not of VLAN type */ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { @@ -551,7 +558,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) * @skb: skbuff to query * @vlan_tci: buffer to store value * - * Returns error if @skb->vlan_tci is not set correctly + * Returns: error if @skb->vlan_tci is not set correctly */ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, u16 *vlan_tci) @@ -570,7 +577,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, * @skb: skbuff to query * @vlan_tci: buffer to store value * - * Returns error if the skb is not VLAN tagged + * Returns: error if the skb is not VLAN tagged */ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { @@ -582,13 +589,13 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) } /** - * vlan_get_protocol - get protocol EtherType. + * __vlan_get_protocol_offset() - get protocol EtherType. * @skb: skbuff to query * @type: first vlan protocol * @mac_offset: MAC offset * @depth: buffer to store length of eth and vlan tags in bytes * - * Returns the EtherType of the packet, regardless of whether it is + * Returns: the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb, @@ -639,7 +646,7 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, * vlan_get_protocol - get protocol EtherType. * @skb: skbuff to query * - * Returns the EtherType of the packet, regardless of whether it is + * Returns: the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ static inline __be16 vlan_get_protocol(const struct sk_buff *skb) @@ -720,7 +727,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb, * Expects the skb to contain a VLAN tag in the payload, and to have skb->data * pointing at the MAC header. * - * Returns a new pointer to skb->data, or NULL on failure to pull. + * Returns: a new pointer to skb->data, or NULL on failure to pull. */ static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci) { @@ -737,7 +744,7 @@ static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci) * skb_vlan_tagged - check if skb is vlan tagged. * @skb: skbuff to query * - * Returns true if the skb is tagged, regardless of whether it is hardware + * Returns: true if the skb is tagged, regardless of whether it is hardware * accelerated or not. */ static inline bool skb_vlan_tagged(const struct sk_buff *skb) @@ -753,7 +760,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb) * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers. * @skb: skbuff to query * - * Returns true if the skb is tagged with multiple vlan headers, regardless + * Returns: true if the skb is tagged with multiple vlan headers, regardless * of whether it is hardware accelerated or not. */ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) @@ -784,7 +791,7 @@ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) * @skb: skbuff to query * @features: features to be checked * - * Returns features without unsafe ones if the skb has multiple tags. + * Returns: features without unsafe ones if the skb has multiple tags. */ static inline netdev_features_t vlan_features_check(struct sk_buff *skb, netdev_features_t features) @@ -808,9 +815,11 @@ static inline netdev_features_t vlan_features_check(struct sk_buff *skb, * @h1: Pointer to vlan header * @h2: Pointer to vlan header * - * Compare two vlan headers, returns 0 if equal. + * Compare two vlan headers. * * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits. + * + * Return: 0 if equal, arbitrary non-zero value if not equal. */ static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1, const struct vlan_hdr *h2) diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 5171231f70a8..073b30a9b850 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -87,6 +87,8 @@ struct ip_mc_list { char loaded; unsigned char gsquery; /* check source marks? */ unsigned char crcount; + unsigned long mca_cstamp; + unsigned long mca_tstamp; struct rcu_head rcu; }; diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index f8c1d2505940..417073c52380 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -29,6 +29,7 @@ struct ad_sd_calib_data { struct ad_sigma_delta; struct device; +struct gpio_desc; struct iio_dev; /** @@ -53,6 +54,7 @@ struct iio_dev; * @irq_flags: flags for the interrupt used by the triggered buffer * @num_slots: Number of sequencer slots * @irq_line: IRQ for reading conversions. If 0, spi->irq will be used + * @num_resetclks: Number of SPI clk cycles with MOSI=1 to reset the chip. */ struct ad_sigma_delta_info { int (*set_channel)(struct ad_sigma_delta *, unsigned int channel); @@ -69,6 +71,7 @@ struct ad_sigma_delta_info { unsigned long irq_flags; unsigned int num_slots; int irq_line; + unsigned int num_resetclks; }; /** @@ -85,6 +88,7 @@ struct ad_sigma_delta { /* private: */ struct completion completion; + spinlock_t irq_lock; /* protects .irq_dis and irq en/disable state */ bool irq_dis; bool bus_locked; @@ -96,7 +100,8 @@ struct ad_sigma_delta { unsigned int active_slots; unsigned int current_slot; unsigned int num_slots; - int irq_line; + struct gpio_desc *rdy_gpiod; + int irq_line; bool status_appended; /* map slots to channels in order to know what to expect from devices */ unsigned int *slots; @@ -178,8 +183,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, unsigned int size, unsigned int *val); -int ad_sd_reset(struct ad_sigma_delta *sigma_delta, - unsigned int reset_length); +int ad_sd_reset(struct ad_sigma_delta *sigma_delta); int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, int *val); diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 418b1307d3f2..3b8d618bb3df 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -37,7 +37,7 @@ int iio_pop_from_buffer(struct iio_buffer *buffer, void *data); static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev, void *data, int64_t timestamp) { - if (indio_dev->scan_timestamp) { + if (ACCESS_PRIVATE(indio_dev, scan_timestamp)) { size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1; ((int64_t *)data)[ts_offset] = timestamp; } diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 333d1d8ccb37..6a4479616479 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -418,7 +418,7 @@ unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan); * @chan: The channel being queried. * @attr: The ext_info attribute to read. * @buf: Where to store the attribute value. Assumed to hold - * at least PAGE_SIZE bytes. + * at least PAGE_SIZE bytes and to be aligned at PAGE_SIZE. * * Returns the number of bytes written to buf (perhaps w/o zero termination; * it need not even be a string), or an error code. @@ -445,7 +445,7 @@ ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr, * iio_read_channel_label() - read label for a given channel * @chan: The channel being queried. * @buf: Where to store the attribute value. Assumed to hold - * at least PAGE_SIZE bytes. + * at least PAGE_SIZE bytes and to be aligned at PAGE_SIZE. * * Returns the number of bytes written to buf, or an error code. */ diff --git a/include/linux/iio/iio-gts-helper.h b/include/linux/iio/iio-gts-helper.h index 9cb6c80dea71..e5de7a124bad 100644 --- a/include/linux/iio/iio-gts-helper.h +++ b/include/linux/iio/iio-gts-helper.h @@ -188,6 +188,9 @@ int iio_gts_total_gain_to_scale(struct iio_gts *gts, int total_gain, int iio_gts_find_gain_sel_for_scale_using_time(struct iio_gts *gts, int time_sel, int scale_int, int scale_nano, int *gain_sel); +int iio_gts_find_gain_time_sel_for_scale(struct iio_gts *gts, int scale_int, + int scale_nano, int *gain_sel, + int *time_sel); int iio_gts_get_scale(struct iio_gts *gts, int gain, int time, int *scale_int, int *scale_nano); int iio_gts_find_new_gain_sel_by_old_gain_time(struct iio_gts *gts, @@ -196,6 +199,9 @@ int iio_gts_find_new_gain_sel_by_old_gain_time(struct iio_gts *gts, int iio_gts_find_new_gain_by_old_gain_time(struct iio_gts *gts, int old_gain, int old_time, int new_time, int *new_gain); +int iio_gts_find_new_gain_by_gain_time_min(struct iio_gts *gts, int old_gain, + int old_time, int new_time, + int *new_gain, bool *in_range); int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type, int *length); int iio_gts_all_avail_scales(struct iio_gts *gts, const int **vals, int *type, diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index a89e7e43e441..4247497f3f8b 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -28,7 +28,7 @@ * @groupcounter: index of next attribute group * @legacy_scan_el_group: attribute group for legacy scan elements attribute group * @legacy_buffer_group: attribute group for legacy buffer attributes group - * @bounce_buffer: for devices that call iio_push_to_buffers_with_timestamp_unaligned() + * @bounce_buffer: for devices that call iio_push_to_buffers_with_ts_unaligned() * @bounce_buffer_size: size of currently allocate bounce buffer * @scan_index_timestamp: cache of the index to the timestamp * @clock_id: timestamping clock posix identifier diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index ae65890d4567..56161e02f002 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -611,7 +611,7 @@ struct iio_dev { const unsigned long *available_scan_masks; unsigned int __private masklength; const unsigned long *active_scan_mask; - bool scan_timestamp; + bool __private scan_timestamp; struct iio_trigger *trig; struct iio_poll_func *pollfunc; struct iio_poll_func *pollfunc_event; diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index e6a75356567a..4bb98d9731de 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -99,7 +99,6 @@ struct adis_data { * @spi: Reference to SPI device which owns this ADIS IIO device * @trig: IIO trigger object data * @data: ADIS chip variant specific data - * @burst: ADIS burst transfer information * @burst_extra_len: Burst extra length. Should only be used by devices that can * dynamically change their burst mode length. * @state_lock: Lock used by the device to protect state diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h index 37572e4dc73a..1ee237b56183 100644 --- a/include/linux/iio/timer/stm32-timer-trigger.h +++ b/include/linux/iio/timer/stm32-timer-trigger.h @@ -72,6 +72,12 @@ #define TIM17_OC1 "tim17_oc1" +#define TIM20_OC1 "tim20_oc1" +#define TIM20_OC2 "tim20_oc2" +#define TIM20_OC3 "tim20_oc3" +#define TIM20_TRGO "tim20_trgo" +#define TIM20_TRGO2 "tim20_trgo2" + #if IS_REACHABLE(CONFIG_IIO_STM32_TIMER_TRIGGER) bool is_stm32_timer_trigger(struct iio_trigger *trig); #else diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h index bc7babe91b2e..bf675a8aef8a 100644 --- a/include/linux/instrumentation.h +++ b/include/linux/instrumentation.h @@ -4,14 +4,14 @@ #ifdef CONFIG_NOINSTR_VALIDATION +#include <linux/objtool.h> #include <linux/stringify.h> /* Begin/end of an instrumentation safe region */ #define __instrumentation_begin(c) ({ \ asm volatile(__stringify(c) ": nop\n\t" \ - ".pushsection .discard.instr_begin\n\t" \ - ".long " __stringify(c) "b - .\n\t" \ - ".popsection\n\t" : : "i" (c)); \ + ANNOTATE_INSTR_BEGIN(__ASM_BREF(c)) \ + : : "i" (c)); \ }) #define instrumentation_begin() __instrumentation_begin(__COUNTER__) @@ -48,9 +48,8 @@ */ #define __instrumentation_end(c) ({ \ asm volatile(__stringify(c) ": nop\n\t" \ - ".pushsection .discard.instr_end\n\t" \ - ".long " __stringify(c) "b - .\n\t" \ - ".popsection\n\t" : : "i" (c)); \ + ANNOTATE_INSTR_END(__ASM_BREF(c)) \ + : : "i" (c)); \ }) #define instrumentation_end() __instrumentation_end(__COUNTER__) #else /* !CONFIG_NOINSTR_VALIDATION */ diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h index 771622650247..dfbf7d9d7bb5 100644 --- a/include/linux/intel-ish-client-if.h +++ b/include/linux/intel-ish-client-if.h @@ -100,7 +100,6 @@ void ishtp_cl_destroy_connection(struct ishtp_cl *cl, bool reset); int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length); int ishtp_cl_flush_queues(struct ishtp_cl *cl); int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb); -bool ishtp_cl_tx_empty(struct ishtp_cl *cl); struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl); void *ishtp_get_client_data(struct ishtp_cl *cl); void ishtp_set_client_data(struct ishtp_cl *cl, void *data); diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index ce86b09ae80f..bba2a51c87d2 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -181,11 +181,21 @@ struct io_pgtable_cfg { }; /** + * struct arm_lpae_io_pgtable_walk_data - information from a pgtable walk + * + * @ptes: The recorded PTE values from the walk + */ +struct arm_lpae_io_pgtable_walk_data { + u64 ptes[4]; +}; + +/** * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. * * @map_pages: Map a physically contiguous range of pages of the same size. * @unmap_pages: Unmap a range of virtually contiguous pages of the same size. * @iova_to_phys: Translate iova to physical address. + * @pgtable_walk: (optional) Perform a page table walk for a given iova. * * These functions map directly onto the iommu_ops member functions with * the same names. @@ -199,6 +209,7 @@ struct io_pgtable_ops { struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); + int (*pgtable_walk)(struct io_pgtable_ops *ops, unsigned long iova, void *wd); int (*read_and_clear_dirty)(struct io_pgtable_ops *ops, unsigned long iova, size_t size, unsigned long flags, diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index fd4cdb0860a2..623d8e798a11 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -78,8 +78,9 @@ struct io_hash_table { struct io_mapped_region { struct page **pages; - void *vmap_ptr; - size_t nr_pages; + void *ptr; + unsigned nr_pages; + unsigned flags; }; /* @@ -293,6 +294,11 @@ struct io_ring_ctx { struct io_submit_state submit_state; + /* + * Modifications are protected by ->uring_lock and ->mmap_lock. + * The flags, buf_pages and buf_nr_pages fields should be stable + * once published. + */ struct xarray io_bl_xa; struct io_hash_table cancel_table; @@ -424,17 +430,10 @@ struct io_ring_ctx { * side will need to grab this lock, to prevent either side from * being run concurrently with the other. */ - struct mutex resize_lock; - - /* - * If IORING_SETUP_NO_MMAP is used, then the below holds - * the gup'ed pages for the two rings, and the sqes. - */ - unsigned short n_ring_pages; - unsigned short n_sqe_pages; - struct page **ring_pages; - struct page **sqe_pages; + struct mutex mmap_lock; + struct io_mapped_region sq_region; + struct io_mapped_region ring_region; /* used for optimised request parameter and wait argument passing */ struct io_mapped_region param_region; }; @@ -481,6 +480,7 @@ enum { REQ_F_BL_NO_RECYCLE_BIT, REQ_F_BUFFERS_COMMIT_BIT, REQ_F_BUF_NODE_BIT, + REQ_F_HAS_METADATA_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -561,6 +561,8 @@ enum { REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT), /* buf node is valid */ REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT), + /* request has read/write metadata assigned */ + REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 318d27841130..38c65e92ecd0 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -587,9 +587,6 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size, * - IOMMU_DOMAIN_DMA: must use a dma domain * - 0: use the default setting * @default_domain_ops: the default ops for domains - * @remove_dev_pasid: Remove any translation configurations of a specific - * pasid, so that any DMA transactions with this pasid - * will be blocked by the hardware. * @viommu_alloc: Allocate an iommufd_viommu on a physical IOMMU instance behind * the @dev, as the set of virtualization resources shared/passed * to user space IOMMU instance. And associate it with a nesting @@ -647,8 +644,6 @@ struct iommu_ops { struct iommu_page_response *msg); int (*def_domain_type)(struct device *dev); - void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid, - struct iommu_domain *domain); struct iommufd_viommu *(*viommu_alloc)( struct device *dev, struct iommu_domain *parent_domain, diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index a1c9c0d48ebf..2f74dd90c271 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -126,7 +126,7 @@ int ipmi_create_user(unsigned int if_num, * the users before you destroy the callback structures, it should be * safe, too. */ -int ipmi_destroy_user(struct ipmi_user *user); +void ipmi_destroy_user(struct ipmi_user *user); /* Get the IPMI version of the BMC we are talking to. */ int ipmi_get_version(struct ipmi_user *user, diff --git a/include/linux/irq.h b/include/linux/irq.h index fa711f80957b..8daa17f0107a 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -64,7 +64,6 @@ enum irqchip_irq_state; * IRQ_NOAUTOEN - Interrupt is not automatically enabled in * request/setup_irq() * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) - * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context * IRQ_NESTED_THREAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable * IRQ_IS_POLLED - Always polled by another interrupt. Exclude @@ -93,7 +92,6 @@ enum { IRQ_NOREQUEST = (1 << 11), IRQ_NOAUTOEN = (1 << 12), IRQ_NO_BALANCING = (1 << 13), - IRQ_MOVE_PCNTXT = (1 << 14), IRQ_NESTED_THREAD = (1 << 15), IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), @@ -105,7 +103,7 @@ enum { #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ - IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ + IRQ_NOAUTOEN | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN) @@ -201,8 +199,6 @@ struct irq_data { * IRQD_LEVEL - Interrupt is level triggered * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup * from suspend - * IRQD_MOVE_PCNTXT - Interrupt can be moved in process - * context * IRQD_IRQ_DISABLED - Disabled state of the interrupt * IRQD_IRQ_MASKED - Masked state of the interrupt * IRQD_IRQ_INPROGRESS - In progress state of the interrupt @@ -233,7 +229,6 @@ enum { IRQD_AFFINITY_SET = BIT(12), IRQD_LEVEL = BIT(13), IRQD_WAKEUP_STATE = BIT(14), - IRQD_MOVE_PCNTXT = BIT(15), IRQD_IRQ_DISABLED = BIT(16), IRQD_IRQ_MASKED = BIT(17), IRQD_IRQ_INPROGRESS = BIT(18), @@ -338,11 +333,6 @@ static inline bool irqd_is_wakeup_set(struct irq_data *d) return __irqd_to_state(d) & IRQD_WAKEUP_STATE; } -static inline bool irqd_can_move_in_process_context(struct irq_data *d) -{ - return __irqd_to_state(d) & IRQD_MOVE_PCNTXT; -} - static inline bool irqd_irq_disabled(struct irq_data *d) { return __irqd_to_state(d) & IRQD_IRQ_DISABLED; @@ -567,6 +557,7 @@ struct irq_chip { * in the suspend path if they are in disabled state * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup * IRQCHIP_IMMUTABLE: Don't ever change anything in this chip + * IRQCHIP_MOVE_DEFERRED: Move the interrupt in actual interrupt context */ enum { IRQCHIP_SET_TYPE_MASKED = (1 << 0), @@ -581,6 +572,7 @@ enum { IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9), IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), IRQCHIP_IMMUTABLE = (1 << 11), + IRQCHIP_MOVE_DEFERRED = (1 << 12), }; #include <linux/irqdesc.h> @@ -694,6 +686,9 @@ extern int irq_chip_request_resources_parent(struct irq_data *data); extern void irq_chip_release_resources_parent(struct irq_data *data); #endif +/* Disable or mask interrupts during a kernel kexec */ +extern void machine_kexec_mask_interrupts(void); + /* Handling of unhandled and spurious interrupts: */ extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 50f7ea8714bf..561025b4f3d9 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -28,7 +28,7 @@ #include <linux/slab.h> #include <linux/bit_spinlock.h> #include <linux/blkdev.h> -#include <crypto/hash.h> +#include <linux/crc32c.h> #endif #define journal_oom_retry 1 @@ -1242,13 +1242,6 @@ struct journal_s void *j_private; /** - * @j_chksum_driver: - * - * Reference to checksum algorithm driver via cryptoapi. - */ - struct crypto_shash *j_chksum_driver; - - /** * @j_csum_seed: * * Precomputed journal UUID checksum for seeding other checksums. @@ -1750,10 +1743,7 @@ static inline bool jbd2_journal_has_csum_v2or3_feature(journal_t *j) static inline int jbd2_journal_has_csum_v2or3(journal_t *journal) { - WARN_ON_ONCE(jbd2_journal_has_csum_v2or3_feature(journal) && - journal->j_chksum_driver == NULL); - - return journal->j_chksum_driver != NULL; + return jbd2_journal_has_csum_v2or3_feature(journal); } static inline int jbd2_journal_get_num_fc_blks(journal_superblock_t *jsb) @@ -1790,27 +1780,10 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal) #define BJ_Reserved 4 /* Buffer is reserved for access by journal */ #define BJ_Types 5 -/* JBD uses a CRC32 checksum */ -#define JBD_MAX_CHECKSUM_SIZE 4 - static inline u32 jbd2_chksum(journal_t *journal, u32 crc, const void *address, unsigned int length) { - DEFINE_RAW_FLEX(struct shash_desc, desc, __ctx, - DIV_ROUND_UP(JBD_MAX_CHECKSUM_SIZE, - sizeof(*((struct shash_desc *)0)->__ctx))); - int err; - - BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) > - JBD_MAX_CHECKSUM_SIZE); - - desc->tfm = journal->j_chksum_driver; - *(u32 *)desc->__ctx = crc; - - err = crypto_shash_update(desc, address, length); - BUG_ON(err); - - return *(u32 *)desc->__ctx; + return crc32c(crc, address, length); } /* Return most recent uncommitted transaction */ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index f5a2727ca4a9..fdb79dd1ebd8 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -75,6 +75,7 @@ #include <linux/types.h> #include <linux/compiler.h> +#include <linux/cleanup.h> extern bool static_key_initialized; @@ -347,6 +348,8 @@ static inline void static_key_disable(struct static_key *key) #endif /* CONFIG_JUMP_LABEL */ +DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock()) + #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #define jump_label_enabled static_key_enabled diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index c3f075e8f60c..1c6a6c1704d8 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -57,10 +57,10 @@ static inline void *dereference_symbol_descriptor(void *ptr) preempt_disable(); mod = __module_address((unsigned long)ptr); - preempt_enable(); if (mod) ptr = dereference_module_function_descriptor(mod, ptr); + preempt_enable(); #endif return ptr; } diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 6bbfc8aa42e8..890011071f2b 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -153,7 +153,7 @@ static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache, void __kasan_poison_new_object(struct kmem_cache *cache, void *object); /** - * kasan_unpoison_new_object - Repoison a new slab object. + * kasan_poison_new_object - Repoison a new slab object. * @cache: Cache the object belong to. * @object: Pointer to the object. * @@ -491,7 +491,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_record_aux_stack(void *ptr); -void kasan_record_aux_stack_noalloc(void *ptr); #else /* CONFIG_KASAN_GENERIC */ @@ -509,7 +508,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache, static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} static inline void kasan_record_aux_stack(void *ptr) {} -static inline void kasan_record_aux_stack_noalloc(void *ptr) {} #endif /* CONFIG_KASAN_GENERIC */ diff --git a/include/linux/kcore.h b/include/linux/kcore.h index 86c0f1d18998..9a2fa013c91d 100644 --- a/include/linux/kcore.h +++ b/include/linux/kcore.h @@ -20,19 +20,6 @@ struct kcore_list { int type; }; -struct vmcore { - struct list_head list; - unsigned long long paddr; - unsigned long long size; - loff_t offset; -}; - -struct vmcoredd_node { - struct list_head list; /* List of dumps */ - void *buf; /* Buffer containing device's dump */ - unsigned int size; /* Size of the buffer */ -}; - #ifdef CONFIG_PROC_KCORE void __init kclist_add(struct kcore_list *, void *, size_t, int type); diff --git a/include/linux/kdb.h b/include/linux/kdb.h index f6c2ddb16b95..905a2e2f45f6 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -140,9 +140,6 @@ extern const char *kdb_diemsg; extern unsigned int kdb_flags; /* Global flags, see kdb_state for per cpu state */ -extern void kdb_save_flags(void); -extern void kdb_restore_flags(void); - #define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag) #define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag)) #define KDB_FLAG_CLEAR(flag) ((void)(kdb_flags &= ~KDB_FLAG_##flag)) diff --git a/include/linux/kref.h b/include/linux/kref.h index d32e21a2538c..88e82ab1367c 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -46,18 +46,18 @@ static inline void kref_get(struct kref *kref) } /** - * kref_put - decrement refcount for object. - * @kref: object. - * @release: pointer to the function that will clean up the object when the + * kref_put - Decrement refcount for object + * @kref: Object + * @release: Pointer to the function that will clean up the object when the * last reference to the object is released. - * This pointer is required, and it is not acceptable to pass kfree - * in as this function. * - * Decrement the refcount, and if 0, call release(). - * Return 1 if the object was removed, otherwise return 0. Beware, if this - * function returns 0, you still can not count on the kref from remaining in - * memory. Only use the return value if you want to see if the kref is now - * gone, not present. + * Decrement the refcount, and if 0, call @release. The caller may not + * pass NULL or kfree() as the release function. + * + * Return: 1 if this call removed the object, otherwise return 0. Beware, + * if this function returns 0, another caller may have removed the object + * by the time this function returns. The return value is only certain + * if you want to see if the object is definitely released. */ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) { @@ -68,17 +68,37 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref) return 0; } +/** + * kref_put_mutex - Decrement refcount for object + * @kref: Object + * @release: Pointer to the function that will clean up the object when the + * last reference to the object is released. + * @mutex: Mutex which protects the release function. + * + * This variant of kref_lock() calls the @release function with the @mutex + * held. The @release function will release the mutex. + */ static inline int kref_put_mutex(struct kref *kref, void (*release)(struct kref *kref), - struct mutex *lock) + struct mutex *mutex) { - if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) { + if (refcount_dec_and_mutex_lock(&kref->refcount, mutex)) { release(kref); return 1; } return 0; } +/** + * kref_put_lock - Decrement refcount for object + * @kref: Object + * @release: Pointer to the function that will clean up the object when the + * last reference to the object is released. + * @lock: Spinlock which protects the release function. + * + * This variant of kref_lock() calls the @release function with the @lock + * held. The @release function will release the lock. + */ static inline int kref_put_lock(struct kref *kref, void (*release)(struct kref *kref), spinlock_t *lock) @@ -94,8 +114,6 @@ static inline int kref_put_lock(struct kref *kref, * kref_get_unless_zero - Increment refcount for object unless it is zero. * @kref: object. * - * Return non-zero if the increment succeeded. Otherwise return 0. - * * This function is intended to simplify locking around refcounting for * objects that can be looked up from a lookup structure, and which are * removed from that lookup structure in the object destructor. @@ -105,6 +123,8 @@ static inline int kref_put_lock(struct kref *kref, * With a lookup followed by a kref_get_unless_zero *with return value check* * locking in the kref_put path can be deferred to the actual removal from * the lookup structure and RCU lookups become trivial. + * + * Return: non-zero if the increment succeeded. Otherwise return 0. */ static inline int __must_check kref_get_unless_zero(struct kref *kref) { diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 6a53ac4885bb..d73095b5cd96 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -93,6 +93,7 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); void collect_procs_ksm(const struct folio *folio, const struct page *page, struct list_head *to_kill, int force_early); long ksm_process_profit(struct mm_struct *); +bool ksm_process_mergeable(struct mm_struct *mm); #else /* !CONFIG_KSM */ diff --git a/include/linux/kthread.h b/include/linux/kthread.h index b11f53c1ba2e..8d27403888ce 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -85,6 +85,7 @@ kthread_run_on_cpu(int (*threadfn)(void *data), void *data, void free_kthread_struct(struct task_struct *k); void kthread_bind(struct task_struct *k, unsigned int cpu); void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); +int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask); int kthread_stop(struct task_struct *k); int kthread_stop_put(struct task_struct *k); bool kthread_should_stop(void); @@ -186,13 +187,58 @@ extern void __kthread_init_worker(struct kthread_worker *worker, int kthread_worker_fn(void *worker_ptr); -__printf(2, 3) -struct kthread_worker * -kthread_create_worker(unsigned int flags, const char namefmt[], ...); +__printf(3, 4) +struct kthread_worker *kthread_create_worker_on_node(unsigned int flags, + int node, + const char namefmt[], ...); + +#define kthread_create_worker(flags, namefmt, ...) \ + kthread_create_worker_on_node(flags, NUMA_NO_NODE, namefmt, ## __VA_ARGS__); + +/** + * kthread_run_worker - create and wake a kthread worker. + * @flags: flags modifying the default behavior of the worker + * @namefmt: printf-style name for the thread. + * + * Description: Convenient wrapper for kthread_create_worker() followed by + * wake_up_process(). Returns the kthread_worker or ERR_PTR(-ENOMEM). + */ +#define kthread_run_worker(flags, namefmt, ...) \ +({ \ + struct kthread_worker *__kw \ + = kthread_create_worker(flags, namefmt, ## __VA_ARGS__); \ + if (!IS_ERR(__kw)) \ + wake_up_process(__kw->task); \ + __kw; \ +}) -__printf(3, 4) struct kthread_worker * +struct kthread_worker * kthread_create_worker_on_cpu(int cpu, unsigned int flags, - const char namefmt[], ...); + const char namefmt[]); + +/** + * kthread_run_worker_on_cpu - create and wake a cpu bound kthread worker. + * @cpu: CPU number + * @flags: flags modifying the default behavior of the worker + * @namefmt: printf-style name for the thread. Format is restricted + * to "name.*%u". Code fills in cpu number. + * + * Description: Convenient wrapper for kthread_create_worker_on_cpu() + * followed by wake_up_process(). Returns the kthread_worker or + * ERR_PTR(-ENOMEM). + */ +static inline struct kthread_worker * +kthread_run_worker_on_cpu(int cpu, unsigned int flags, + const char namefmt[]) +{ + struct kthread_worker *kw; + + kw = kthread_create_worker_on_cpu(cpu, flags, namefmt); + if (!IS_ERR(kw)) + wake_up_process(kw->task); + + return kw; +} bool kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work); diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 3a4e723eae0f..383ed9985802 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -222,6 +222,11 @@ static inline ktime_t ns_to_ktime(u64 ns) return ns; } +static inline ktime_t us_to_ktime(u64 us) +{ + return us * NSEC_PER_USEC; +} + static inline ktime_t ms_to_ktime(u64 ms) { return ms * NSEC_PER_MSEC; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 401439bb21e3..3cb9a32a6330 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -255,11 +255,17 @@ union kvm_mmu_notifier_arg { unsigned long attributes; }; +enum kvm_gfn_range_filter { + KVM_FILTER_SHARED = BIT(0), + KVM_FILTER_PRIVATE = BIT(1), +}; + struct kvm_gfn_range { struct kvm_memory_slot *slot; gfn_t start; gfn_t end; union kvm_mmu_notifier_arg arg; + enum kvm_gfn_range_filter attr_filter; bool may_block; }; bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); @@ -596,7 +602,12 @@ struct kvm_memory_slot { #ifdef CONFIG_KVM_PRIVATE_MEM struct { - struct file __rcu *file; + /* + * Writes protected by kvm->slots_lock. Acquiring a + * reference via kvm_gmem_get_file() is protected by + * either kvm->slots_lock or kvm->srcu. + */ + struct file *file; pgoff_t pgoff; } gmem; #endif @@ -963,6 +974,15 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) { int num_vcpus = atomic_read(&kvm->online_vcpus); + + /* + * Explicitly verify the target vCPU is online, as the anti-speculation + * logic only limits the CPU's ability to speculate, e.g. given a "bad" + * index, clamping the index to 0 would return vCPU0, not NULL. + */ + if (i >= num_vcpus) + return NULL; + i = array_index_nospec(i, num_vcpus); /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ @@ -970,9 +990,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) return xa_load(&kvm->vcpu_array, i); } -#define kvm_for_each_vcpu(idx, vcpup, kvm) \ - xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \ - (atomic_read(&kvm->online_vcpus) - 1)) +#define kvm_for_each_vcpu(idx, vcpup, kvm) \ + if (atomic_read(&kvm->online_vcpus)) \ + xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \ + (atomic_read(&kvm->online_vcpus) - 1)) static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) { @@ -1183,7 +1204,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn * -- just change its flags * * Since flags can be changed by some of these operations, the following - * differentiation is the best we can do for __kvm_set_memory_region(): + * differentiation is the best we can do for kvm_set_memory_region(): */ enum kvm_mr_change { KVM_MR_CREATE, @@ -1192,10 +1213,8 @@ enum kvm_mr_change { KVM_MR_FLAGS_ONLY, }; -int kvm_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region2 *mem); -int __kvm_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region2 *mem); +int kvm_set_internal_memslot(struct kvm *kvm, + const struct kvm_userspace_memory_region2 *mem); void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); int kvm_arch_prepare_memory_region(struct kvm *kvm, diff --git a/include/linux/libata.h b/include/linux/libata.h index c1a85d46eba6..c1c57f814b98 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1199,10 +1199,9 @@ extern int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]); extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); -extern int ata_scsi_slave_alloc(struct scsi_device *sdev); -int ata_scsi_device_configure(struct scsi_device *sdev, - struct queue_limits *lim); -extern void ata_scsi_slave_destroy(struct scsi_device *sdev); +extern int ata_scsi_sdev_init(struct scsi_device *sdev); +int ata_scsi_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim); +extern void ata_scsi_sdev_destroy(struct scsi_device *sdev); extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth); extern int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, @@ -1301,8 +1300,8 @@ extern struct ata_port *ata_port_alloc(struct ata_host *host); extern void ata_port_free(struct ata_port *ap); extern int ata_tport_add(struct device *parent, struct ata_port *ap); extern void ata_tport_delete(struct ata_port *ap); -int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim, - struct ata_port *ap); +int ata_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim, + struct ata_port *ap); extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis); @@ -1458,8 +1457,8 @@ extern const struct attribute_group *ata_common_sdev_groups[]; .this_id = ATA_SHT_THIS_ID, \ .emulated = ATA_SHT_EMULATED, \ .proc_name = drv_name, \ - .slave_alloc = ata_scsi_slave_alloc, \ - .slave_destroy = ata_scsi_slave_destroy, \ + .sdev_init = ata_scsi_sdev_init, \ + .sdev_destroy = ata_scsi_sdev_destroy, \ .bios_param = ata_std_bios_param, \ .unlock_native_capacity = ata_scsi_unlock_native_capacity,\ .max_sectors = ATA_MAX_SECTORS_LBA48 @@ -1467,14 +1466,14 @@ extern const struct attribute_group *ata_common_sdev_groups[]; #define ATA_SUBBASE_SHT(drv_name) \ __ATA_BASE_SHT(drv_name), \ .can_queue = ATA_DEF_QUEUE, \ - .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ - .device_configure = ata_scsi_device_configure + .tag_alloc_policy_rr = true, \ + .sdev_configure = ata_scsi_sdev_configure #define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \ __ATA_BASE_SHT(drv_name), \ .can_queue = drv_qd, \ - .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ - .device_configure = ata_scsi_device_configure + .tag_alloc_policy_rr = true, \ + .sdev_configure = ata_scsi_sdev_configure #define ATA_BASE_SHT(drv_name) \ ATA_SUBBASE_SHT(drv_name), \ diff --git a/include/linux/libgcc.h b/include/linux/libgcc.h index fc388da6a027..0d68f9d6a6a7 100644 --- a/include/linux/libgcc.h +++ b/include/linux/libgcc.h @@ -34,4 +34,8 @@ long long notrace __lshrdi3(long long u, word_type b); long long notrace __muldi3(long long u, long long v); word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b); +#ifdef CONFIG_HAVE_ARCH_LIBGCC_H +#include <asm/libgcc.h> +#endif + #endif /* __ASM_LIBGCC_H */ diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 05c166811f6b..fe739d35a864 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -91,13 +91,24 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren * @memcg: the cgroup of the sublist to add the item to. * * If the element is already part of a list, this function returns doing - * nothing. Therefore the caller does not need to keep state about whether or - * not the element already belongs in the list and is allowed to lazy update - * it. Note however that this is valid for *a* list, not *this* list. If - * the caller organize itself in a way that elements can be in more than - * one type of list, it is up to the caller to fully remove the item from - * the previous list (with list_lru_del() for instance) before moving it - * to @lru. + * nothing. This means that it is not necessary to keep state about whether or + * not the element already belongs in the list. That said, this logic only + * works if the item is in *this* list. If the item might be in some other + * list, then you cannot rely on this check and you must remove it from the + * other list before trying to insert it. + * + * The lru list consists of many sublists internally; the @nid and @memcg + * parameters are used to determine which sublist to insert the item into. + * It's important to use the right value of @nid and @memcg when deleting the + * item, since it might otherwise get deleted from the wrong sublist. + * + * This also applies when attempting to insert the item multiple times - if + * the item is currently in one sublist and you call list_lru_add() again, you + * must pass the right @nid and @memcg parameters so that the same sublist is + * used. + * + * You must ensure that the memcg is not freed during this call (e.g., with + * rcu or by taking a css refcnt). * * Return: true if the list was updated, false otherwise */ @@ -113,7 +124,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid, * memcg of the sublist is determined by @item list_head. This assumption is * valid for slab objects LRU such as dentries, inodes, etc. * - * Return value: true if the list was updated, false otherwise + * Return: true if the list was updated, false otherwise */ bool list_lru_add_obj(struct list_lru *lru, struct list_head *item); @@ -125,8 +136,19 @@ bool list_lru_add_obj(struct list_lru *lru, struct list_head *item); * @memcg: the cgroup of the sublist to delete the item from. * * This function works analogously as list_lru_add() in terms of list - * manipulation. The comments about an element already pertaining to - * a list are also valid for list_lru_del(). + * manipulation. + * + * The comments in list_lru_add() about an element already being in a list are + * also valid for list_lru_del(), that is, you can delete an item that has + * already been removed or never been added. However, if the item is in a + * list, it must be in *this* list, and you must pass the right value of @nid + * and @memcg so that the right sublist is used. + * + * You must ensure that the memcg is not freed during this call (e.g., with + * rcu or by taking a css refcnt). When a memcg is deleted, list_lru entries + * are automatically moved to the parent memcg. This is done in a race-free + * way, so during deletion of an memcg both the old and new memcg will resolve + * to the same sublist internally. * * Return: true if the list was updated, false otherwise */ @@ -142,7 +164,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid, * memcg of the sublist is determined by @item list_head. This assumption is * valid for slab objects LRU such as dentries, inodes, etc. * - * Return value: true if the list was updated, false otherwise. + * Return: true if the list was updated, false otherwise. */ bool list_lru_del_obj(struct list_lru *lru, struct list_head *item); diff --git a/include/linux/lockref.h b/include/linux/lockref.h index c3a1f78bc884..c39f119659ba 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -34,14 +34,24 @@ struct lockref { }; }; -extern void lockref_get(struct lockref *); -extern int lockref_put_return(struct lockref *); -extern int lockref_get_not_zero(struct lockref *); -extern int lockref_put_not_zero(struct lockref *); -extern int lockref_put_or_lock(struct lockref *); - -extern void lockref_mark_dead(struct lockref *); -extern int lockref_get_not_dead(struct lockref *); +/** + * lockref_init - Initialize a lockref + * @lockref: pointer to lockref structure + * @count: initial count + */ +static inline void lockref_init(struct lockref *lockref, unsigned int count) +{ + spin_lock_init(&lockref->lock); + lockref->count = count; +} + +void lockref_get(struct lockref *lockref); +int lockref_put_return(struct lockref *lockref); +bool lockref_get_not_zero(struct lockref *lockref); +bool lockref_put_or_lock(struct lockref *lockref); + +void lockref_mark_dead(struct lockref *lockref); +bool lockref_get_not_dead(struct lockref *lockref); /* Must be called under spinlock for reliable results */ static inline bool __lockref_is_dead(const struct lockref *l) diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 97a8b21eb033..e13d2f947b51 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -77,6 +77,7 @@ struct common_audit_data { #define LSM_AUDIT_DATA_LOCKDOWN 15 #define LSM_AUDIT_DATA_NOTIFICATION 16 #define LSM_AUDIT_DATA_ANONINODE 17 +#define LSM_AUDIT_DATA_NLMSGTYPE 18 union { struct path path; struct dentry *dentry; @@ -98,6 +99,7 @@ struct common_audit_data { struct lsm_ibendport_audit *ibendport; int reason; const char *anonclass; + u16 nlmsg_type; } u; /* this union contains LSM specific data */ union { @@ -116,14 +118,28 @@ struct common_audit_data { #define v4info fam.v4 #define v6info fam.v6 +#ifdef CONFIG_AUDIT + int ipv4_skb_to_auditdata(struct sk_buff *skb, struct common_audit_data *ad, u8 *proto); +#if IS_ENABLED(CONFIG_IPV6) int ipv6_skb_to_auditdata(struct sk_buff *skb, struct common_audit_data *ad, u8 *proto); +#endif /* IS_ENABLED(CONFIG_IPV6) */ void common_lsm_audit(struct common_audit_data *a, void (*pre_audit)(struct audit_buffer *, void *), void (*post_audit)(struct audit_buffer *, void *)); +#else /* CONFIG_AUDIT */ + +static inline void common_lsm_audit(struct common_audit_data *a, + void (*pre_audit)(struct audit_buffer *, void *), + void (*post_audit)(struct audit_buffer *, void *)) +{ +} + +#endif /* CONFIG_AUDIT */ + #endif diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index eb2937599cb0..e2f1ce37c41e 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -83,7 +83,7 @@ LSM_HOOK(int, 0, move_mount, const struct path *from_path, const struct path *to_path) LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry, int mode, const struct qstr *name, const char **xattr_name, - void **ctx, u32 *ctxlen) + struct lsm_context *cp) LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, struct cred *new) @@ -295,17 +295,16 @@ LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name, char **value) LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size) LSM_HOOK(int, 0, ismaclabel, const char *name) -LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata, - u32 *seclen) +LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, struct lsm_context *cp) LSM_HOOK(int, -EOPNOTSUPP, lsmprop_to_secctx, struct lsm_prop *prop, - char **secdata, u32 *seclen) + struct lsm_context *cp) LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid) -LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen) +LSM_HOOK(void, LSM_RET_VOID, release_secctx, struct lsm_context *cp) LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode) LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen) LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen) -LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx, - u32 *ctxlen) +LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, + struct lsm_context *cp) #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) LSM_HOOK(int, 0, post_notification, const struct cred *w_cred, diff --git a/include/linux/lz4.h b/include/linux/lz4.h index b16e15b9587a..ad6042a718b5 100644 --- a/include/linux/lz4.h +++ b/include/linux/lz4.h @@ -645,4 +645,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest, int LZ4_decompress_fast_usingDict(const char *source, char *dest, int originalSize, const char *dictStart, int dictSize); +#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32) + +#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */ +#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ +#endif + #endif diff --git a/include/linux/mailbox/exynos-message.h b/include/linux/mailbox/exynos-message.h new file mode 100644 index 000000000000..5a9ed5ce2046 --- /dev/null +++ b/include/linux/mailbox/exynos-message.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Exynos mailbox message. + * + * Copyright 2024 Linaro Ltd. + */ + +#ifndef _LINUX_EXYNOS_MESSAGE_H_ +#define _LINUX_EXYNOS_MESSAGE_H_ + +#define EXYNOS_MBOX_CHAN_TYPE_DOORBELL 0 +#define EXYNOS_MBOX_CHAN_TYPE_DATA 1 + +struct exynos_mbox_msg { + unsigned int chan_id; + unsigned int chan_type; +}; + +#endif /* _LINUX_EXYNOS_MESSAGE_H_ */ diff --git a/include/linux/mailbox/mchp-ipc.h b/include/linux/mailbox/mchp-ipc.h new file mode 100644 index 000000000000..f084ac9e291b --- /dev/null +++ b/include/linux/mailbox/mchp-ipc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + *Copyright (c) 2024 Microchip Technology Inc. All rights reserved. + */ + +#ifndef _LINUX_MCHP_IPC_H_ +#define _LINUX_MCHP_IPC_H_ + +#include <linux/mailbox_controller.h> +#include <linux/types.h> + +struct mchp_ipc_msg { + u32 *buf; + u16 size; +}; + +struct mchp_ipc_sbi_chan { + void *buf_base_tx; + void *buf_base_rx; + void *msg_buf_tx; + void *msg_buf_rx; + phys_addr_t buf_base_tx_addr; + phys_addr_t buf_base_rx_addr; + phys_addr_t msg_buf_tx_addr; + phys_addr_t msg_buf_rx_addr; + int chan_aggregated_irq; + int mp_irq; + int mc_irq; + u32 id; + u32 max_msg_size; +}; + +#endif /* _LINUX_MCHP_IPC_H_ */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 673d5cae7c81..e79eb6ac516f 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -378,6 +378,10 @@ static inline int memblock_get_region_node(const struct memblock_region *r) /* Flags for memblock allocation APIs */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) #define MEMBLOCK_ALLOC_ACCESSIBLE 0 +/* + * MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies + * MEMBLOCK_ALLOC_ACCESSIBLE + */ #define MEMBLOCK_ALLOC_NOLEAKTRACE 1 /* We are using top down, so it is safe to use 0 here */ @@ -417,6 +421,12 @@ static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align) MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); } +void *__memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align, + const char *func); + +#define memblock_alloc_or_panic(size, align) \ + __memblock_alloc_or_panic(size, align, __func__) + static inline void *memblock_alloc_raw(phys_addr_t size, phys_addr_t align) { diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5502aa8e138e..6e74b8254d9b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -620,8 +620,6 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target, page_counter_read(&memcg->memory); } -void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg); - int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); /** @@ -646,8 +644,7 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, return __mem_cgroup_charge(folio, mm, gfp); } -int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, - long nr_pages); +int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp); int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); @@ -677,7 +674,6 @@ static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) __mem_cgroup_uncharge_folios(folios); } -void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages); void mem_cgroup_replace_folio(struct folio *old, struct folio *new); void mem_cgroup_migrate(struct folio *old, struct folio *new); @@ -1046,6 +1042,23 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, void split_page_memcg(struct page *head, int old_order, int new_order); +static inline u64 cgroup_id_from_mm(struct mm_struct *mm) +{ + struct mem_cgroup *memcg; + u64 id; + + if (mem_cgroup_disabled()) + return 0; + + rcu_read_lock(); + memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (!memcg) + memcg = root_mem_cgroup; + id = cgroup_id(memcg->css.cgroup); + rcu_read_unlock(); + return id; +} + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 @@ -1135,21 +1148,15 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target, return false; } -static inline void mem_cgroup_commit_charge(struct folio *folio, - struct mem_cgroup *memcg) -{ -} - static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) { return 0; } -static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, - gfp_t gfp, long nr_pages) +static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp) { - return 0; + return 0; } static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, @@ -1170,11 +1177,6 @@ static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) { } -static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, - unsigned int nr_pages) -{ -} - static inline void mem_cgroup_replace_folio(struct folio *old, struct folio *new) { @@ -1466,6 +1468,11 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) static inline void split_page_memcg(struct page *head, int old_order, int new_order) { } + +static inline u64 cgroup_id_from_mm(struct mm_struct *mm) +{ + return 0; +} #endif /* CONFIG_MEMCG */ /* diff --git a/include/linux/memfd.h b/include/linux/memfd.h index d437e3070850..246daadbfde8 100644 --- a/include/linux/memfd.h +++ b/include/linux/memfd.h @@ -7,7 +7,14 @@ #ifdef CONFIG_MEMFD_CREATE extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg); struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx); -unsigned int *memfd_file_seals_ptr(struct file *file); +/* + * Check for any existing seals on mmap, return an error if access is denied due + * to sealing, or 0 otherwise. + * + * We also update VMA flags if appropriate by manipulating the VMA flags pointed + * to by vm_flags_ptr. + */ +int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr); #else static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) { @@ -17,19 +24,11 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) { return ERR_PTR(-EINVAL); } - -static inline unsigned int *memfd_file_seals_ptr(struct file *file) +static inline int memfd_check_seals_mmap(struct file *file, + unsigned long *vm_flags_ptr) { - return NULL; + return 0; } #endif -/* Retrieve memfd seals associated with the file, if any. */ -static inline unsigned int memfd_file_seals(struct file *file) -{ - unsigned int *sealsp = memfd_file_seals_ptr(file); - - return sealsp ? *sealsp : 0; -} - #endif /* __LINUX_MEMFD_H */ diff --git a/include/linux/memory/ti-aemif.h b/include/linux/memory/ti-aemif.h new file mode 100644 index 000000000000..da94a9d985e7 --- /dev/null +++ b/include/linux/memory/ti-aemif.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __MEMORY_TI_AEMIF_H +#define __MEMORY_TI_AEMIF_H + +/** + * struct aemif_cs_timings: structure to hold CS timing configuration + * values are expressed in number of clock cycles - 1 + * @ta: minimum turn around time + * @rhold: read hold width + * @rstrobe: read strobe width + * @rsetup: read setup width + * @whold: write hold width + * @wstrobe: write strobe width + * @wsetup: write setup width + */ +struct aemif_cs_timings { + u32 ta; + u32 rhold; + u32 rstrobe; + u32 rsetup; + u32 whold; + u32 wstrobe; + u32 wsetup; +}; + +struct aemif_device; + +int aemif_set_cs_timings(struct aemif_device *aemif, u8 cs, struct aemif_cs_timings *timings); +int aemif_check_cs_timings(struct aemif_cs_timings *timings); + +#endif // __MEMORY_TI_AEMIF_H diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index b27ddce5d324..eaac5ae8c05c 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -144,8 +144,6 @@ extern u64 max_mem_size; extern int mhp_online_type_from_str(const char *str); -/* Default online_type (MMOP_*) when new memory blocks are added. */ -extern int mhp_default_online_type; /* If movable_node boot option specified */ extern bool movable_node_enabled; static inline bool movable_node_is_enabled(void) @@ -303,6 +301,9 @@ static inline void __remove_memory(u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_MEMORY_HOTPLUG +/* Default online_type (MMOP_*) when new memory blocks are added. */ +extern int mhp_get_default_online_type(void); +extern void mhp_set_default_online_type(int online_type); extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat); extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index e8bcad641d8c..faeea7abd688 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -72,7 +72,7 @@ struct mfd_cell { int (*resume)(struct platform_device *dev); /* platform data passed to the sub devices drivers */ - void *platform_data; + const void *platform_data; size_t pdata_size; /* Matches ACPI */ diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h index 76feb3a7066d..9cb2fc2938ce 100644 --- a/include/linux/mfd/da9052/da9052.h +++ b/include/linux/mfd/da9052/da9052.h @@ -93,6 +93,8 @@ struct da9052 { int chip_irq; + int fault_log; + /* SOC I/O transfer related fixes for DA9052/53 */ int (*fix_io) (struct da9052 *da9052, unsigned char reg); }; diff --git a/include/linux/mfd/qnap-mcu.h b/include/linux/mfd/qnap-mcu.h new file mode 100644 index 000000000000..8d48c212fd44 --- /dev/null +++ b/include/linux/mfd/qnap-mcu.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Core definitions for QNAP MCU MFD driver. + * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de> + */ + +#ifndef _LINUX_QNAP_MCU_H_ +#define _LINUX_QNAP_MCU_H_ + +struct qnap_mcu; + +struct qnap_mcu_variant { + u32 baud_rate; + int num_drives; + int fan_pwm_min; + int fan_pwm_max; + bool usb_led; +}; + +int qnap_mcu_exec(struct qnap_mcu *mcu, + const u8 *cmd_data, size_t cmd_data_size, + u8 *reply_data, size_t reply_data_size); +int qnap_mcu_exec_with_ack(struct qnap_mcu *mcu, + const u8 *cmd_data, size_t cmd_data_size); + +#endif /* _LINUX_QNAP_MCU_H_ */ diff --git a/include/linux/mfd/tps65219.h b/include/linux/mfd/tps65219.h index e6826e34e2a6..546bceec7173 100644 --- a/include/linux/mfd/tps65219.h +++ b/include/linux/mfd/tps65219.h @@ -10,14 +10,9 @@ #include <linux/bitops.h> #include <linux/notifier.h> +#include <linux/regmap.h> #include <linux/regulator/driver.h> -struct regmap; -struct regmap_irq_chip_data; - -#define TPS65219_1V35 1350000 -#define TPS65219_1V8 1800000 - /* TPS chip id list */ #define TPS65219 0xF0 diff --git a/include/linux/mfd/upboard-fpga.h b/include/linux/mfd/upboard-fpga.h new file mode 100644 index 000000000000..12231e40f5da --- /dev/null +++ b/include/linux/mfd/upboard-fpga.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * UP Board CPLD/FPGA driver + * + * Copyright (c) AAEON. All rights reserved. + * Copyright (C) 2024 Bootlin + * + * Author: Gary Wang <garywang@aaeon.com.tw> + * Author: Thomas Richard <thomas.richard@bootlin.com> + * + */ + +#ifndef __LINUX_MFD_UPBOARD_FPGA_H +#define __LINUX_MFD_UPBOARD_FPGA_H + +#define UPBOARD_REGISTER_SIZE 16 + +enum upboard_fpgareg { + UPBOARD_REG_PLATFORM_ID = 0x10, + UPBOARD_REG_FIRMWARE_ID = 0x11, + UPBOARD_REG_FUNC_EN0 = 0x20, + UPBOARD_REG_FUNC_EN1 = 0x21, + UPBOARD_REG_GPIO_EN0 = 0x30, + UPBOARD_REG_GPIO_EN1 = 0x31, + UPBOARD_REG_GPIO_EN2 = 0x32, + UPBOARD_REG_GPIO_DIR0 = 0x40, + UPBOARD_REG_GPIO_DIR1 = 0x41, + UPBOARD_REG_GPIO_DIR2 = 0x42, + UPBOARD_REG_MAX, +}; + +enum upboard_fpga_type { + UPBOARD_UP_FPGA, + UPBOARD_UP2_FPGA, +}; + +struct upboard_fpga_data { + enum upboard_fpga_type type; + const struct regmap_config *regmap_config; +}; + +struct upboard_fpga { + struct device *dev; + struct regmap *regmap; + struct gpio_desc *enable_gpio; + struct gpio_desc *reset_gpio; + struct gpio_desc *clear_gpio; + struct gpio_desc *strobe_gpio; + struct gpio_desc *datain_gpio; + struct gpio_desc *dataout_gpio; + unsigned int firmware_version; + const struct upboard_fpga_data *fpga_data; +}; + +#endif /* __LINUX_MFD_UPBOARD_FPGA_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 002e49b2ebd9..29919faea2f1 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -144,16 +144,14 @@ const struct movable_operations *page_movable_ops(struct page *page) #ifdef CONFIG_NUMA_BALANCING int migrate_misplaced_folio_prepare(struct folio *folio, struct vm_area_struct *vma, int node); -int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, - int node); +int migrate_misplaced_folio(struct folio *folio, int node); #else static inline int migrate_misplaced_folio_prepare(struct folio *folio, struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ } -static inline int migrate_misplaced_folio(struct folio *folio, - struct vm_area_struct *vma, int node) +static inline int migrate_misplaced_folio(struct folio *folio, int node) { return -EAGAIN; /* can't migrate now */ } diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h index e781727c8916..1160bed6579e 100644 --- a/include/linux/min_heap.h +++ b/include/linux/min_heap.h @@ -6,6 +6,17 @@ #include <linux/string.h> #include <linux/types.h> +/* + * The Min Heap API provides utilities for managing min-heaps, a binary tree + * structure where each node's value is less than or equal to its children's + * values, ensuring the smallest element is at the root. + * + * Users should avoid directly calling functions prefixed with __min_heap_*(). + * Instead, use the provided macro wrappers. + * + * For further details and examples, refer to Documentation/core-api/min_heap.rst. + */ + /** * Data structure to hold a min-heap. * @nr: Number of elements currently in the heap. @@ -15,8 +26,8 @@ */ #define MIN_HEAP_PREALLOCATED(_type, _name, _nr) \ struct _name { \ - int nr; \ - int size; \ + size_t nr; \ + size_t size; \ _type *data; \ _type preallocated[_nr]; \ } @@ -218,7 +229,7 @@ void __min_heap_init_inline(min_heap_char *heap, void *data, int size) } #define min_heap_init_inline(_heap, _data, _size) \ - __min_heap_init_inline((min_heap_char *)_heap, _data, _size) + __min_heap_init_inline(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size) /* Get the minimum element from the heap. */ static __always_inline @@ -228,7 +239,8 @@ void *__min_heap_peek_inline(struct min_heap_char *heap) } #define min_heap_peek_inline(_heap) \ - (__minheap_cast(_heap) __min_heap_peek_inline((min_heap_char *)_heap)) + (__minheap_cast(_heap) \ + __min_heap_peek_inline(container_of(&(_heap)->nr, min_heap_char, nr))) /* Check if the heap is full. */ static __always_inline @@ -238,7 +250,7 @@ bool __min_heap_full_inline(min_heap_char *heap) } #define min_heap_full_inline(_heap) \ - __min_heap_full_inline((min_heap_char *)_heap) + __min_heap_full_inline(container_of(&(_heap)->nr, min_heap_char, nr)) /* Sift the element at pos down the heap. */ static __always_inline @@ -277,8 +289,8 @@ void __min_heap_sift_down_inline(min_heap_char *heap, int pos, size_t elem_size, } #define min_heap_sift_down_inline(_heap, _pos, _func, _args) \ - __min_heap_sift_down_inline((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_sift_down_inline(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \ + __minheap_obj_size(_heap), _func, _args) /* Sift up ith element from the heap, O(log2(nr)). */ static __always_inline @@ -304,8 +316,8 @@ void __min_heap_sift_up_inline(min_heap_char *heap, size_t elem_size, size_t idx } #define min_heap_sift_up_inline(_heap, _idx, _func, _args) \ - __min_heap_sift_up_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \ - _func, _args) + __min_heap_sift_up_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) /* Floyd's approach to heapification that is O(nr). */ static __always_inline @@ -319,7 +331,8 @@ void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size, } #define min_heapify_all_inline(_heap, _func, _args) \ - __min_heapify_all_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heapify_all_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) /* Remove minimum element from the heap, O(log2(nr)). */ static __always_inline @@ -340,7 +353,8 @@ bool __min_heap_pop_inline(min_heap_char *heap, size_t elem_size, } #define min_heap_pop_inline(_heap, _func, _args) \ - __min_heap_pop_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heap_pop_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) /* * Remove the minimum element and then push the given element. The @@ -356,8 +370,8 @@ void __min_heap_pop_push_inline(min_heap_char *heap, const void *element, size_t } #define min_heap_pop_push_inline(_heap, _element, _func, _args) \ - __min_heap_pop_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_pop_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) /* Push an element on to the heap, O(log2(nr)). */ static __always_inline @@ -382,8 +396,8 @@ bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t ele } #define min_heap_push_inline(_heap, _element, _func, _args) \ - __min_heap_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) /* Remove ith element from the heap, O(log2(nr)). */ static __always_inline @@ -411,8 +425,8 @@ bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx, } #define min_heap_del_inline(_heap, _idx, _func, _args) \ - __min_heap_del_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \ - _func, _args) + __min_heap_del_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) void __min_heap_init(min_heap_char *heap, void *data, int size); void *__min_heap_peek(struct min_heap_char *heap); @@ -433,25 +447,31 @@ bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx, const struct min_heap_callbacks *func, void *args); #define min_heap_init(_heap, _data, _size) \ - __min_heap_init((min_heap_char *)_heap, _data, _size) + __min_heap_init(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size) #define min_heap_peek(_heap) \ - (__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap)) + (__minheap_cast(_heap) __min_heap_peek(container_of(&(_heap)->nr, min_heap_char, nr))) #define min_heap_full(_heap) \ - __min_heap_full((min_heap_char *)_heap) + __min_heap_full(container_of(&(_heap)->nr, min_heap_char, nr)) #define min_heap_sift_down(_heap, _pos, _func, _args) \ - __min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args) + __min_heap_sift_down(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_sift_up(_heap, _idx, _func, _args) \ - __min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) + __min_heap_sift_up(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) #define min_heapify_all(_heap, _func, _args) \ - __min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heapify_all(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_pop(_heap, _func, _args) \ - __min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heap_pop(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_pop_push(_heap, _element, _func, _args) \ - __min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_pop_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_push(_heap, _element, _func, _args) \ - __min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) + __min_heap_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_del(_heap, _idx, _func, _args) \ - __min_heap_del((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) + __min_heap_del(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) #endif /* _LINUX_MIN_HEAP_H */ diff --git a/include/linux/minmax.h b/include/linux/minmax.h index 98008dd92153..eaaf5c008e4d 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -8,13 +8,10 @@ #include <linux/types.h> /* - * min()/max()/clamp() macros must accomplish three things: + * min()/max()/clamp() macros must accomplish several things: * * - Avoid multiple evaluations of the arguments (so side-effects like * "x++" happen only once) when non-constant. - * - Retain result as a constant expressions when called with only - * constant expressions (to avoid tripping VLA warnings in stack - * allocation usage). * - Perform signed v unsigned type-checking (to generate compile * errors instead of nasty runtime surprises). * - Unsigned char/short are always promoted to signed int and can be @@ -31,58 +28,54 @@ * bit #0 set if ok for unsigned comparisons * bit #1 set if ok for signed comparisons * - * In particular, statically non-negative signed integer - * expressions are ok for both. + * In particular, statically non-negative signed integer expressions + * are ok for both. * - * NOTE! Unsigned types smaller than 'int' are implicitly - * converted to 'int' in expressions, and are accepted for - * signed conversions for now. This is debatable. + * NOTE! Unsigned types smaller than 'int' are implicitly converted to 'int' + * in expressions, and are accepted for signed conversions for now. + * This is debatable. * - * Note that 'x' is the original expression, and 'ux' is - * the unique variable that contains the value. + * Note that 'x' is the original expression, and 'ux' is the unique variable + * that contains the value. * - * We use 'ux' for pure type checking, and 'x' for when - * we need to look at the value (but without evaluating - * it for side effects! Careful to only ever evaluate it - * with sizeof() or __builtin_constant_p() etc). + * We use 'ux' for pure type checking, and 'x' for when we need to look at the + * value (but without evaluating it for side effects! + * Careful to only ever evaluate it with sizeof() or __builtin_constant_p() etc). * - * Pointers end up being checked by the normal C type - * rules at the actual comparison, and these expressions - * only need to be careful to not cause warnings for - * pointer use. + * Pointers end up being checked by the normal C type rules at the actual + * comparison, and these expressions only need to be careful to not cause + * warnings for pointer use. */ -#define __signed_type_use(x,ux) (2+__is_nonneg(x,ux)) -#define __unsigned_type_use(x,ux) (1+2*(sizeof(ux)<4)) -#define __sign_use(x,ux) (is_signed_type(typeof(ux))? \ - __signed_type_use(x,ux):__unsigned_type_use(x,ux)) +#define __sign_use(ux) (is_signed_type(typeof(ux)) ? \ + (2 + __is_nonneg(ux)) : (1 + 2 * (sizeof(ux) < 4))) /* - * To avoid warnings about casting pointers to integers - * of different sizes, we need that special sign type. + * Check whether a signed value is always non-negative. * - * On 64-bit we can just always use 'long', since any - * integer or pointer type can just be cast to that. + * A cast is needed to avoid any warnings from values that aren't signed + * integer types (in which case the result doesn't matter). * - * This does not work for 128-bit signed integers since - * the cast would truncate them, but we do not use s128 - * types in the kernel (we do use 'u128', but they will - * be handled by the !is_signed_type() case). + * On 64-bit any integer or pointer type can safely be cast to 'long long'. + * But on 32-bit we need to avoid warnings about casting pointers to integers + * of different sizes without truncating 64-bit values so 'long' or 'long long' + * must be used depending on the size of the value. * - * NOTE! The cast is there only to avoid any warnings - * from when values that aren't signed integer types. + * This does not work for 128-bit signed integers since the cast would truncate + * them, but we do not use s128 types in the kernel (we do use 'u128', + * but they are handled by the !is_signed_type() case). */ -#ifdef CONFIG_64BIT - #define __signed_type(ux) long +#if __SIZEOF_POINTER__ == __SIZEOF_LONG_LONG__ +#define __is_nonneg(ux) statically_true((long long)(ux) >= 0) #else - #define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux)>4,1LL,1L)) +#define __is_nonneg(ux) statically_true( \ + (typeof(__builtin_choose_expr(sizeof(ux) > 4, 1LL, 1L)))(ux) >= 0) #endif -#define __is_nonneg(x,ux) statically_true((__signed_type(ux))(x)>=0) -#define __types_ok(x,y,ux,uy) \ - (__sign_use(x,ux) & __sign_use(y,uy)) +#define __types_ok(ux, uy) \ + (__sign_use(ux) & __sign_use(uy)) -#define __types_ok3(x,y,z,ux,uy,uz) \ - (__sign_use(x,ux) & __sign_use(y,uy) & __sign_use(z,uz)) +#define __types_ok3(ux, uy, uz) \ + (__sign_use(ux) & __sign_use(uy) & __sign_use(uz)) #define __cmp_op_min < #define __cmp_op_max > @@ -97,30 +90,13 @@ #define __careful_cmp_once(op, x, y, ux, uy) ({ \ __auto_type ux = (x); __auto_type uy = (y); \ - BUILD_BUG_ON_MSG(!__types_ok(x,y,ux,uy), \ + BUILD_BUG_ON_MSG(!__types_ok(ux, uy), \ #op"("#x", "#y") signedness error"); \ __cmp(op, ux, uy); }) #define __careful_cmp(op, x, y) \ __careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_)) -#define __clamp(val, lo, hi) \ - ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val))) - -#define __clamp_once(val, lo, hi, uval, ulo, uhi) ({ \ - __auto_type uval = (val); \ - __auto_type ulo = (lo); \ - __auto_type uhi = (hi); \ - static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \ - (lo) <= (hi), true), \ - "clamp() low limit " #lo " greater than high limit " #hi); \ - BUILD_BUG_ON_MSG(!__types_ok3(val,lo,hi,uval,ulo,uhi), \ - "clamp("#val", "#lo", "#hi") signedness error"); \ - __clamp(uval, ulo, uhi); }) - -#define __careful_clamp(val, lo, hi) \ - __clamp_once(val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_)) - /** * min - return minimum of two values of the same or compatible types * @x: first value @@ -154,7 +130,7 @@ #define __careful_op3(op, x, y, z, ux, uy, uz) ({ \ __auto_type ux = (x); __auto_type uy = (y);__auto_type uz = (z);\ - BUILD_BUG_ON_MSG(!__types_ok3(x,y,z,ux,uy,uz), \ + BUILD_BUG_ON_MSG(!__types_ok3(ux, uy, uz), \ #op"3("#x", "#y", "#z") signedness error"); \ __cmp(op, ux, __cmp(op, uy, uz)); }) @@ -177,6 +153,22 @@ __careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_)) /** + * min_t - return minimum of two values, using the specified type + * @type: data type to use + * @x: first value + * @y: second value + */ +#define min_t(type, x, y) __cmp_once(min, type, x, y) + +/** + * max_t - return maximum of two values, using the specified type + * @type: data type to use + * @x: first value + * @y: second value + */ +#define max_t(type, x, y) __cmp_once(max, type, x, y) + +/** * min_not_zero - return the minimum that is _not_ zero, unless both are zero * @x: value1 * @y: value2 @@ -186,39 +178,57 @@ typeof(y) __y = (y); \ __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) +#define __clamp(val, lo, hi) \ + ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val))) + +#define __clamp_once(type, val, lo, hi, uval, ulo, uhi) ({ \ + type uval = (val); \ + type ulo = (lo); \ + type uhi = (hi); \ + BUILD_BUG_ON_MSG(statically_true(ulo > uhi), \ + "clamp() low limit " #lo " greater than high limit " #hi); \ + BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi), \ + "clamp("#val", "#lo", "#hi") signedness error"); \ + __clamp(uval, ulo, uhi); }) + +#define __careful_clamp(type, val, lo, hi) \ + __clamp_once(type, val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_)) + /** - * clamp - return a value clamped to a given range with strict typechecking + * clamp - return a value clamped to a given range with typechecking * @val: current value * @lo: lowest allowable value * @hi: highest allowable value * - * This macro does strict typechecking of @lo/@hi to make sure they are of the - * same type as @val. See the unnecessary pointer comparisons. - */ -#define clamp(val, lo, hi) __careful_clamp(val, lo, hi) - -/* - * ..and if you can't take the strict - * types, you can specify one yourself. - * - * Or not use min/max/clamp at all, of course. + * This macro checks @val/@lo/@hi to make sure they have compatible + * signedness. */ +#define clamp(val, lo, hi) __careful_clamp(__auto_type, val, lo, hi) /** - * min_t - return minimum of two values, using the specified type - * @type: data type to use - * @x: first value - * @y: second value + * clamp_t - return a value clamped to a given range using a given type + * @type: the type of variable to use + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of type + * @type to make all the comparisons. */ -#define min_t(type, x, y) __cmp_once(min, type, x, y) +#define clamp_t(type, val, lo, hi) __careful_clamp(type, val, lo, hi) /** - * max_t - return maximum of two values, using the specified type - * @type: data type to use - * @x: first value - * @y: second value + * clamp_val - return a value clamped to a given range using val's type + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of whatever + * type the input argument @val is. This is useful when @val is an unsigned + * type and @lo and @hi are literals that will otherwise be assigned a signed + * integer type. */ -#define max_t(type, x, y) __cmp_once(max, type, x, y) +#define clamp_val(val, lo, hi) __careful_clamp(typeof(val), val, lo, hi) /* * Do not check the array parameter using __must_be_array(). @@ -263,31 +273,6 @@ */ #define max_array(array, len) __minmax_array(max, array, len) -/** - * clamp_t - return a value clamped to a given range using a given type - * @type: the type of variable to use - * @val: current value - * @lo: minimum allowable value - * @hi: maximum allowable value - * - * This macro does no typechecking and uses temporary variables of type - * @type to make all the comparisons. - */ -#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi)) - -/** - * clamp_val - return a value clamped to a given range using val's type - * @val: current value - * @lo: minimum allowable value - * @hi: maximum allowable value - * - * This macro does no typechecking and uses temporary variables of whatever - * type the input argument @val is. This is useful when @val is an unsigned - * type and @lo and @hi are literals that will otherwise be assigned a signed - * integer type. - */ -#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) - static inline bool in_range64(u64 val, u64 start, u64 len) { return (val - start) < len; @@ -326,9 +311,9 @@ static inline bool in_range32(u32 val, u32 start, u32 len) * Use these carefully: no type checking, and uses the arguments * multiple times. Use for obvious constants only. */ -#define MIN(a,b) __cmp(min,a,b) -#define MAX(a,b) __cmp(max,a,b) -#define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b)) -#define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b)) +#define MIN(a, b) __cmp(min, a, b) +#define MAX(a, b) __cmp(max, a, b) +#define MIN_T(type, a, b) __cmp(min, (type)(a), (type)(b)) +#define MAX_T(type, a, b) __cmp(max, (type)(a), (type)(b)) #endif /* _LINUX_MINMAX_H */ diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index c0fea6ca5076..69e110c2b86a 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -76,7 +76,7 @@ struct device; struct attribute_group; -struct miscdevice { +struct miscdevice { int minor; const char *name; const struct file_operations *fops; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index cc647992f3d1..0c48b20f818a 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1245,6 +1245,7 @@ enum mlx5_cap_type { MLX5_CAP_DEV_EVENT = 0x14, MLX5_CAP_IPSEC, MLX5_CAP_CRYPTO = 0x1a, + MLX5_CAP_SHAMPO = 0x1d, MLX5_CAP_MACSEC = 0x1f, MLX5_CAP_GENERAL_2 = 0x20, MLX5_CAP_PORT_SELECTION = 0x25, @@ -1470,6 +1471,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_MACSEC(mdev, cap)\ MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap) +#define MLX5_CAP_SHAMPO(mdev, cap) \ + MLX5_GET(shampo_cap, mdev->caps.hca[MLX5_CAP_SHAMPO]->cur, cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index ea48eb879a0f..af86097641b0 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -160,9 +160,12 @@ enum { MLX5_REG_MIRC = 0x9162, MLX5_REG_MTPTM = 0x9180, MLX5_REG_MTCTR = 0x9181, + MLX5_REG_MRTCQ = 0x9182, MLX5_REG_SBCAM = 0xB01F, MLX5_REG_RESOURCE_DUMP = 0xC000, + MLX5_REG_NIC_CAP = 0xC00D, MLX5_REG_DTOR = 0xC00E, + MLX5_REG_VHCA_ICM_CTRL = 0xC010, }; enum mlx5_qpts_trust_state { @@ -691,7 +694,6 @@ struct mlx5_timer { struct timecounter tc; u32 nominal_c_mult; unsigned long overflow_period; - struct delayed_work overflow_work; }; struct mlx5_clock { diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 438db888bde0..2a69d9d71276 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -163,7 +163,7 @@ struct mlx5_flow_destination { u32 tir_num; u32 ft_num; struct mlx5_flow_table *ft; - u32 counter_id; + struct mlx5_fc *counter; struct { u16 num; u16 vhca_id; @@ -299,6 +299,8 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); +struct mlx5_fc *mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size); +void mlx5_fc_local_destroy(struct mlx5_fc *counter); u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, u64 *bytes, u64 *packets, u64 *lastuse); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 48d47181c7cd..4f3716e124c9 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1095,7 +1095,9 @@ struct mlx5_ifc_qos_cap_bits { u8 log_esw_max_sched_depth[0x4]; u8 reserved_at_10[0x10]; - u8 reserved_at_20[0xb]; + u8 reserved_at_20[0x9]; + u8 esw_cross_esw_sched[0x1]; + u8 reserved_at_2a[0x1]; u8 log_max_qos_nic_queue_group[0x5]; u8 reserved_at_30[0x10]; @@ -1103,7 +1105,8 @@ struct mlx5_ifc_qos_cap_bits { u8 packet_pacing_min_rate[0x20]; - u8 reserved_at_80[0x10]; + u8 reserved_at_80[0xb]; + u8 log_esw_max_rate_limit[0x5]; u8 packet_pacing_rate_table_size[0x10]; u8 esw_element_type[0x10]; @@ -1590,12 +1593,14 @@ enum { MLX5_STEERING_FORMAT_CONNECTX_5 = 0, MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, MLX5_STEERING_FORMAT_CONNECTX_7 = 2, + MLX5_STEERING_FORMAT_CONNECTX_8 = 3, }; struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x6]; u8 page_request_disable[0x1]; - u8 reserved_at_7[0x9]; + u8 abs_native_port_num[0x1]; + u8 reserved_at_8[0x8]; u8 shared_object_to_user_object_allowed[0x1]; u8 reserved_at_13[0xe]; u8 vhca_resource_manager[0x1]; @@ -1825,7 +1830,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 regexp_params[0x1]; u8 uar_sz[0x6]; u8 port_selection_cap[0x1]; - u8 reserved_at_251[0x1]; + u8 nic_cap_reg[0x1]; u8 umem_uid_0[0x1]; u8 reserved_at_253[0x5]; u8 log_pg_sz[0x8]; @@ -2324,7 +2329,9 @@ struct mlx5_ifc_wq_bits { u8 headers_mkey[0x20]; u8 shampo_enable[0x1]; - u8 reserved_at_1e1[0x4]; + u8 reserved_at_1e1[0x1]; + u8 shampo_mode[0x2]; + u8 reserved_at_1e4[0x1]; u8 log_reservation_size[0x3]; u8 reserved_at_1e8[0x5]; u8 log_max_num_of_packets_per_reservation[0x3]; @@ -3322,6 +3329,14 @@ struct mlx5_ifc_dropped_packet_logged_bits { u8 reserved_at_0[0xe0]; }; +struct mlx5_ifc_nic_cap_reg_bits { + u8 reserved_at_0[0x1a]; + u8 vhca_icm_ctrl[0x1]; + u8 reserved_at_1b[0x5]; + + u8 reserved_at_20[0x60]; +}; + struct mlx5_ifc_default_timeout_bits { u8 to_multiplier[0x3]; u8 reserved_at_3[0x9]; @@ -3358,6 +3373,18 @@ struct mlx5_ifc_dtor_reg_bits { u8 reserved_at_1c0[0x20]; }; +struct mlx5_ifc_vhca_icm_ctrl_reg_bits { + u8 vhca_id_valid[0x1]; + u8 reserved_at_1[0xf]; + u8 vhca_id[0x10]; + + u8 reserved_at_20[0xa0]; + + u8 cur_alloc_icm[0x20]; + + u8 reserved_at_e0[0x120]; +}; + enum { MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1, MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2, @@ -3696,6 +3723,22 @@ struct mlx5_ifc_crypto_cap_bits { u8 reserved_at_80[0x780]; }; +struct mlx5_ifc_shampo_cap_bits { + u8 reserved_at_0[0x3]; + u8 shampo_log_max_reservation_size[0x5]; + u8 reserved_at_8[0x3]; + u8 shampo_log_min_reservation_size[0x5]; + u8 shampo_min_mss_size[0x10]; + + u8 shampo_header_split[0x1]; + u8 shampo_header_split_data_merge[0x1]; + u8 reserved_at_22[0x1]; + u8 shampo_log_max_headers_entry_size[0x5]; + u8 reserved_at_28[0x18]; + + u8 reserved_at_40[0x7c0]; +}; + union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2; @@ -4105,6 +4148,7 @@ enum { SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2, SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4, + SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT = 0x5, }; enum { @@ -4113,34 +4157,41 @@ enum { ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2, ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3, ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4, + ELEMENT_TYPE_CAP_MASK_RATE_LIMIT = 1 << 5, }; enum { TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, + TSAR_ELEMENT_TSAR_TYPE_TC_ARB = 0x3, }; enum { TSAR_TYPE_CAP_MASK_DWRR = 1 << 0, TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1, TSAR_TYPE_CAP_MASK_ETS = 1 << 2, + TSAR_TYPE_CAP_MASK_TC_ARB = 1 << 3, }; struct mlx5_ifc_tsar_element_bits { - u8 reserved_at_0[0x8]; + u8 traffic_class[0x4]; + u8 reserved_at_4[0x4]; u8 tsar_type[0x8]; u8 reserved_at_10[0x10]; }; struct mlx5_ifc_vport_element_bits { - u8 reserved_at_0[0x10]; + u8 reserved_at_0[0x4]; + u8 eswitch_owner_vhca_id_valid[0x1]; + u8 eswitch_owner_vhca_id[0xb]; u8 vport_number[0x10]; }; struct mlx5_ifc_vport_tc_element_bits { u8 traffic_class[0x4]; - u8 reserved_at_4[0xc]; + u8 eswitch_owner_vhca_id_valid[0x1]; + u8 eswitch_owner_vhca_id[0xb]; u8 vport_number[0x10]; }; @@ -4165,7 +4216,9 @@ struct mlx5_ifc_scheduling_context_bits { u8 max_average_bw[0x20]; - u8 reserved_at_e0[0x120]; + u8 max_bw_obj_id[0x20]; + + u8 reserved_at_100[0x100]; }; struct mlx5_ifc_rqtc_bits { @@ -6326,6 +6379,20 @@ struct mlx5_ifc_modify_other_hca_cap_in_bits { struct mlx5_ifc_other_hca_cap_bits other_capability; }; +struct mlx5_ifc_sw_owner_icm_root_params_bits { + u8 sw_owner_icm_root_1[0x40]; + + u8 sw_owner_icm_root_0[0x40]; +}; + +struct mlx5_ifc_rtc_params_bits { + u8 rtc_id_0[0x20]; + + u8 rtc_id_1[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_flow_table_context_bits { u8 reformat_en[0x1]; u8 decap_en[0x1]; @@ -6344,20 +6411,10 @@ struct mlx5_ifc_flow_table_context_bits { u8 lag_master_next_table_id[0x18]; u8 reserved_at_60[0x60]; - union { - struct { - u8 sw_owner_icm_root_1[0x40]; - - u8 sw_owner_icm_root_0[0x40]; - } sws; - struct { - u8 rtc_id_0[0x20]; - u8 rtc_id_1[0x20]; - - u8 reserved_at_100[0x40]; - - } hws; + union { + struct mlx5_ifc_sw_owner_icm_root_params_bits sws; + struct mlx5_ifc_rtc_params_bits hws; }; }; @@ -7006,6 +7063,7 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits { enum { MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1, + MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START = 0x2, MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7, MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9, }; @@ -10133,7 +10191,21 @@ struct mlx5_ifc_pplm_reg_bits { u8 fec_override_admin_200g_2x[0x10]; u8 fec_override_admin_100g_1x[0x10]; - u8 reserved_at_260[0x20]; + u8 reserved_at_260[0x60]; + + u8 fec_override_cap_1600g_8x[0x10]; + u8 fec_override_cap_800g_4x[0x10]; + + u8 fec_override_cap_400g_2x[0x10]; + u8 fec_override_cap_200g_1x[0x10]; + + u8 fec_override_admin_1600g_8x[0x10]; + u8 fec_override_admin_800g_4x[0x10]; + + u8 fec_override_admin_400g_2x[0x10]; + u8 fec_override_admin_200g_1x[0x10]; + + u8 reserved_at_340[0x80]; }; struct mlx5_ifc_ppcnt_reg_bits { @@ -10507,7 +10579,9 @@ struct mlx5_ifc_mtutc_reg_bits { }; struct mlx5_ifc_pcam_enhanced_features_bits { - u8 reserved_at_0[0x48]; + u8 reserved_at_0[0x1d]; + u8 fec_200G_per_lane_in_pplm[0x1]; + u8 reserved_at_1e[0x2a]; u8 fec_100G_per_lane_in_pplm[0x1]; u8 reserved_at_49[0x1f]; u8 fec_50G_per_lane_in_pplm[0x1]; @@ -10647,7 +10721,8 @@ struct mlx5_ifc_mcam_access_reg_bits3 { u8 regs_63_to_32[0x20]; - u8 regs_31_to_2[0x1e]; + u8 regs_31_to_3[0x1d]; + u8 mrtcq[0x1]; u8 mtctr[0x1]; u8 mtptm[0x1]; }; @@ -13138,4 +13213,12 @@ struct mlx5_ifc_msees_reg_bits { u8 reserved_at_80[0x180]; }; +struct mlx5_ifc_mrtcq_reg_bits { + u8 reserved_at_0[0x40]; + + u8 rt_clock_identity[0x40]; + + u8 reserved_at_80[0x180]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index b1c3db9cf355..7b1068ddcbb7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -711,7 +711,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * we don't rely on for anything - the mm_lock_seq read against which we * need ordering is below. */ - if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq)) + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence)) return false; if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) @@ -728,7 +728,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * after it has been unlocked. * This pairs with RELEASE semantics in vma_end_write_all(). */ - if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) { + if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) { up_read(&vma->vm_lock->lock); return false; } @@ -743,7 +743,7 @@ static inline void vma_end_read(struct vm_area_struct *vma) } /* WARNING! Can only be used if mmap_lock is expected to be write-locked */ -static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) +static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq) { mmap_assert_write_locked(vma->vm_mm); @@ -751,7 +751,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) * current task is holding mmap_write_lock, both vma->vm_lock_seq and * mm->mm_lock_seq can't be concurrently modified. */ - *mm_lock_seq = vma->vm_mm->mm_lock_seq; + *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence; return (vma->vm_lock_seq == *mm_lock_seq); } @@ -762,7 +762,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) */ static inline void vma_start_write(struct vm_area_struct *vma) { - int mm_lock_seq; + unsigned int mm_lock_seq; if (__is_vma_write_locked(vma, &mm_lock_seq)) return; @@ -780,7 +780,7 @@ static inline void vma_start_write(struct vm_area_struct *vma) static inline void vma_assert_write_locked(struct vm_area_struct *vma) { - int mm_lock_seq; + unsigned int mm_lock_seq; VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); } @@ -2320,6 +2320,7 @@ extern void pagefault_out_of_memory(void); struct zap_details { struct folio *single_folio; /* Locked folio to be unmapped */ bool even_cows; /* Zap COWed private pages too? */ + bool reclaim_pt; /* Need reclaim page tables? */ zap_flags_t zap_flags; /* Extra flags for zapping */ }; @@ -2991,18 +2992,15 @@ static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; } static inline void ptlock_free(struct ptdesc *ptdesc) {} #endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */ -static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc) +static inline void __pagetable_ctor(struct ptdesc *ptdesc) { struct folio *folio = ptdesc_folio(ptdesc); - if (!ptlock_init(ptdesc)) - return false; __folio_set_pgtable(folio); lruvec_stat_add_folio(folio, NR_PAGETABLE); - return true; } -static inline void pagetable_pte_dtor(struct ptdesc *ptdesc) +static inline void pagetable_dtor(struct ptdesc *ptdesc) { struct folio *folio = ptdesc_folio(ptdesc); @@ -3011,6 +3009,20 @@ static inline void pagetable_pte_dtor(struct ptdesc *ptdesc) lruvec_stat_sub_folio(folio, NR_PAGETABLE); } +static inline void pagetable_dtor_free(struct ptdesc *ptdesc) +{ + pagetable_dtor(ptdesc); + pagetable_free(ptdesc); +} + +static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc) +{ + if (!ptlock_init(ptdesc)) + return false; + __pagetable_ctor(ptdesc); + return true; +} + pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp); static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp) @@ -3087,14 +3099,6 @@ static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) return ptlock_init(ptdesc); } -static inline void pmd_ptlock_free(struct ptdesc *ptdesc) -{ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc)); -#endif - ptlock_free(ptdesc); -} - #define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte) #else @@ -3105,7 +3109,6 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) } static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; } -static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {} #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) @@ -3120,25 +3123,13 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc) { - struct folio *folio = ptdesc_folio(ptdesc); - if (!pmd_ptlock_init(ptdesc)) return false; - __folio_set_pgtable(folio); ptdesc_pmd_pts_init(ptdesc); - lruvec_stat_add_folio(folio, NR_PAGETABLE); + __pagetable_ctor(ptdesc); return true; } -static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc) -{ - struct folio *folio = ptdesc_folio(ptdesc); - - pmd_ptlock_free(ptdesc); - __folio_clear_pgtable(folio); - lruvec_stat_sub_folio(folio, NR_PAGETABLE); -} - /* * No scalability reason to split PUD locks yet, but follow the same pattern * as the PMD locks to make it easier if we decide to. The VM should not be @@ -3160,18 +3151,17 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) static inline void pagetable_pud_ctor(struct ptdesc *ptdesc) { - struct folio *folio = ptdesc_folio(ptdesc); - - __folio_set_pgtable(folio); - lruvec_stat_add_folio(folio, NR_PAGETABLE); + __pagetable_ctor(ptdesc); } -static inline void pagetable_pud_dtor(struct ptdesc *ptdesc) +static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc) { - struct folio *folio = ptdesc_folio(ptdesc); + __pagetable_ctor(ptdesc); +} - __folio_clear_pgtable(folio); - lruvec_stat_sub_folio(folio, NR_PAGETABLE); +static inline void pagetable_pgd_ctor(struct ptdesc *ptdesc) +{ + __pagetable_ctor(ptdesc); } extern void __init pagecache_init(void); @@ -3324,6 +3314,8 @@ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admi extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void exit_mmap(struct mm_struct *); int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift); +bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, bool write); static inline int check_data_rlimit(unsigned long rlim, unsigned long new, @@ -3371,9 +3363,6 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, return __get_unmapped_area(file, addr, len, pgoff, flags, 0); } -extern unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, - struct list_head *uf); extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, @@ -3431,15 +3420,13 @@ extern vm_fault_t filemap_fault(struct vm_fault *vmf); extern vm_fault_t filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); +extern vm_fault_t filemap_fsnotify_fault(struct vm_fault *vmf); extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ int expand_stack_locked(struct vm_area_struct *vma, unsigned long address); struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr); -/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ -int expand_downwards(struct vm_area_struct *vma, unsigned long address); - /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, @@ -4096,67 +4083,6 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping, extern int sysctl_nr_trim_pages; -#ifdef CONFIG_PRINTK -void mem_dump_obj(void *object); -#else -static inline void mem_dump_obj(void *object) {} -#endif - -static inline bool is_write_sealed(int seals) -{ - return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE); -} - -/** - * is_readonly_sealed - Checks whether write-sealed but mapped read-only, - * in which case writes should be disallowing moving - * forwards. - * @seals: the seals to check - * @vm_flags: the VMA flags to check - * - * Returns whether readonly sealed, in which case writess should be disallowed - * going forward. - */ -static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags) -{ - /* - * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as - * MAP_SHARED and read-only, take care to not allow mprotect to - * revert protections on such mappings. Do this only for shared - * mappings. For private mappings, don't need to mask - * VM_MAYWRITE as we still want them to be COW-writable. - */ - if (is_write_sealed(seals) && - ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED)) - return true; - - return false; -} - -/** - * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and - * handle them. - * @seals: the seals to check - * @vma: the vma to operate on - * - * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper - * check/handling on the vma flags. Return 0 if check pass, or <0 for errors. - */ -static inline int seal_check_write(int seals, struct vm_area_struct *vma) -{ - if (!is_write_sealed(seals)) - return 0; - - /* - * New PROT_WRITE and MAP_SHARED mmaps are not allowed when - * write seals are active. - */ - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) - return -EPERM; - - return 0; -} - #ifdef CONFIG_ANON_VMA_NAME int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, unsigned long len_in, diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 1b6a917fffa4..f9157a0c42a5 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -133,31 +133,25 @@ static inline int lru_hist_from_seq(unsigned long seq) return seq % NR_HIST_GENS; } -static inline int lru_tier_from_refs(int refs) +static inline int lru_tier_from_refs(int refs, bool workingset) { VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH)); - /* see the comment in folio_lru_refs() */ - return order_base_2(refs + 1); + /* see the comment on MAX_NR_TIERS */ + return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs); } static inline int folio_lru_refs(struct folio *folio) { unsigned long flags = READ_ONCE(folio->flags); - bool workingset = flags & BIT(PG_workingset); + if (!(flags & BIT(PG_referenced))) + return 0; /* - * Return the number of accesses beyond PG_referenced, i.e., N-1 if the - * total number of accesses is N>1, since N=0,1 both map to the first - * tier. lru_tier_from_refs() will account for this off-by-one. Also see - * the comment on MAX_NR_TIERS. + * Return the total number of accesses including PG_referenced. Also see + * the comment on LRU_REFS_FLAGS. */ - return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset; -} - -static inline void folio_clear_lru_refs(struct folio *folio) -{ - set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); + return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1; } static inline int folio_lru_gen(struct folio *folio) @@ -223,11 +217,43 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen)); } +static inline unsigned long lru_gen_folio_seq(struct lruvec *lruvec, struct folio *folio, + bool reclaiming) +{ + int gen; + int type = folio_is_file_lru(folio); + struct lru_gen_folio *lrugen = &lruvec->lrugen; + + /* + * +-----------------------------------+-----------------------------------+ + * | Accessed through page tables and | Accessed through file descriptors | + * | promoted by folio_update_gen() | and protected by folio_inc_gen() | + * +-----------------------------------+-----------------------------------+ + * | PG_active (set while isolated) | | + * +-----------------+-----------------+-----------------+-----------------+ + * | PG_workingset | PG_referenced | PG_workingset | LRU_REFS_FLAGS | + * +-----------------------------------+-----------------------------------+ + * |<---------- MIN_NR_GENS ---------->| | + * |<---------------------------- MAX_NR_GENS ---------------------------->| + */ + if (folio_test_active(folio)) + gen = MIN_NR_GENS - folio_test_workingset(folio); + else if (reclaiming) + gen = MAX_NR_GENS; + else if ((!folio_is_file_lru(folio) && !folio_test_swapcache(folio)) || + (folio_test_reclaim(folio) && + (folio_test_dirty(folio) || folio_test_writeback(folio)))) + gen = MIN_NR_GENS; + else + gen = MAX_NR_GENS - folio_test_workingset(folio); + + return max(READ_ONCE(lrugen->max_seq) - gen + 1, READ_ONCE(lrugen->min_seq[type])); +} + static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) { unsigned long seq; unsigned long flags; - unsigned long mask; int gen = folio_lru_gen(folio); int type = folio_is_file_lru(folio); int zone = folio_zonenum(folio); @@ -237,40 +263,12 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, if (folio_test_unevictable(folio) || !lrugen->enabled) return false; - /* - * There are four common cases for this page: - * 1. If it's hot, i.e., freshly faulted in, add it to the youngest - * generation, and it's protected over the rest below. - * 2. If it can't be evicted immediately, i.e., a dirty page pending - * writeback, add it to the second youngest generation. - * 3. If it should be evicted first, e.g., cold and clean from - * folio_rotate_reclaimable(), add it to the oldest generation. - * 4. Everything else falls between 2 & 3 above and is added to the - * second oldest generation if it's considered inactive, or the - * oldest generation otherwise. See lru_gen_is_active(). - */ - if (folio_test_active(folio)) - seq = lrugen->max_seq; - else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) || - (folio_test_reclaim(folio) && - (folio_test_dirty(folio) || folio_test_writeback(folio)))) - seq = lrugen->max_seq - 1; - else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq) - seq = lrugen->min_seq[type]; - else - seq = lrugen->min_seq[type] + 1; + seq = lru_gen_folio_seq(lruvec, folio, reclaiming); gen = lru_gen_from_seq(seq); flags = (gen + 1UL) << LRU_GEN_PGOFF; /* see the comment on MIN_NR_GENS about PG_active */ - mask = LRU_GEN_MASK; - /* - * Don't clear PG_workingset here because it can affect PSI accounting - * if the activation is due to workingset refault. - */ - if (folio_test_active(folio)) - mask |= LRU_REFS_MASK | BIT(PG_referenced) | BIT(PG_active); - set_mask_bits(&folio->flags, mask, flags); + set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags); lru_gen_update_size(lruvec, folio, -1, gen); /* for folio_rotate_reclaimable() */ @@ -564,9 +562,9 @@ static inline pte_marker copy_pte_marker( * Must be called with pgtable lock held so that no thread will see the none * pte, and if they see it, they'll fault and serialize at the pgtable lock. * - * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled. + * Returns true if an uffd-wp pte was installed, false otherwise. */ -static inline void +static inline bool pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, pte_t *pte, pte_t pteval) { @@ -583,7 +581,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, * with a swap pte. There's no way of leaking the bit. */ if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) - return; + return false; /* A uffd-wp wr-protected normal pte */ if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval))) @@ -596,10 +594,13 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, if (unlikely(pte_swp_uffd_wp_any(pteval))) arm_uffd_pte = true; - if (unlikely(arm_uffd_pte)) + if (unlikely(arm_uffd_pte)) { set_pte_at(vma->vm_mm, addr, pte, make_pte_marker(PTE_MARKER_UFFD_WP)); + return true; + } #endif + return false; } static inline bool vma_has_recency(struct vm_area_struct *vma) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 332cee285662..5f1b2dc788e2 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -438,7 +438,9 @@ FOLIO_MATCH(compound_head, _head_2a); * struct ptdesc - Memory descriptor for page tables. * @__page_flags: Same as page flags. Powerpc only. * @pt_rcu_head: For freeing page table pages. - * @pt_list: List of used page tables. Used for s390 and x86. + * @pt_list: List of used page tables. Used for s390 gmap shadow pages + * (which are not linked into the user page tables) and x86 + * pgds. * @_pt_pad_1: Padding that aliases with page's compound head. * @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs. * @__page_mapping: Aliases with page->mapping. Unused for page tables. @@ -727,7 +729,7 @@ struct vm_area_struct { * counter reuse can only lead to occasional unnecessary use of the * slowpath. */ - int vm_lock_seq; + unsigned int vm_lock_seq; /* Unstable RCU readers are allowed to read this. */ struct vma_lock *vm_lock; #endif @@ -921,6 +923,9 @@ struct mm_struct { * Roughly speaking, incrementing the sequence number is * equivalent to releasing locks on VMAs; reading the sequence * number can be part of taking a read lock on a VMA. + * Incremented every time mmap_lock is write-locked/unlocked. + * Initialized to 0, therefore odd values indicate mmap_lock + * is write-locked and even values that it's released. * * Can be modified under write mmap_lock using RELEASE * semantics. @@ -929,7 +934,7 @@ struct mm_struct { * Can be read with ACQUIRE semantics if not holding write * mmap_lock. */ - int mm_lock_seq; + seqcount_t mm_lock_seq; #endif diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index de9dc20b01ba..45a21faa3ff6 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -71,39 +71,68 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm) } #ifdef CONFIG_PER_VMA_LOCK -/* - * Drop all currently-held per-VMA locks. - * This is called from the mmap_lock implementation directly before releasing - * a write-locked mmap_lock (or downgrading it to read-locked). - * This should normally NOT be called manually from other places. - * If you want to call this manually anyway, keep in mind that this will release - * *all* VMA write locks, including ones from further up the stack. - */ -static inline void vma_end_write_all(struct mm_struct *mm) + +static inline void mm_lock_seqcount_init(struct mm_struct *mm) +{ + seqcount_init(&mm->mm_lock_seq); +} + +static inline void mm_lock_seqcount_begin(struct mm_struct *mm) +{ + do_raw_write_seqcount_begin(&mm->mm_lock_seq); +} + +static inline void mm_lock_seqcount_end(struct mm_struct *mm) +{ + ASSERT_EXCLUSIVE_WRITER(mm->mm_lock_seq); + do_raw_write_seqcount_end(&mm->mm_lock_seq); +} + +static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) { - mmap_assert_write_locked(mm); /* - * Nobody can concurrently modify mm->mm_lock_seq due to exclusive - * mmap_lock being held. - * We need RELEASE semantics here to ensure that preceding stores into - * the VMA take effect before we unlock it with this store. - * Pairs with ACQUIRE semantics in vma_start_read(). + * Since mmap_lock is a sleeping lock, and waiting for it to become + * unlocked is more or less equivalent with taking it ourselves, don't + * bother with the speculative path if mmap_lock is already write-locked + * and take the slow path, which takes the lock. */ - smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1); + return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq); +} + +static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) +{ + return read_seqcount_retry(&mm->mm_lock_seq, seq); } -#else -static inline void vma_end_write_all(struct mm_struct *mm) {} -#endif + +#else /* CONFIG_PER_VMA_LOCK */ + +static inline void mm_lock_seqcount_init(struct mm_struct *mm) {} +static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {} +static inline void mm_lock_seqcount_end(struct mm_struct *mm) {} + +static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) +{ + return false; +} + +static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) +{ + return true; +} + +#endif /* CONFIG_PER_VMA_LOCK */ static inline void mmap_init_lock(struct mm_struct *mm) { init_rwsem(&mm->mmap_lock); + mm_lock_seqcount_init(mm); } static inline void mmap_write_lock(struct mm_struct *mm) { __mmap_lock_trace_start_locking(mm, true); down_write(&mm->mmap_lock); + mm_lock_seqcount_begin(mm); __mmap_lock_trace_acquire_returned(mm, true, true); } @@ -111,6 +140,7 @@ static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) { __mmap_lock_trace_start_locking(mm, true); down_write_nested(&mm->mmap_lock, subclass); + mm_lock_seqcount_begin(mm); __mmap_lock_trace_acquire_returned(mm, true, true); } @@ -120,10 +150,26 @@ static inline int mmap_write_lock_killable(struct mm_struct *mm) __mmap_lock_trace_start_locking(mm, true); ret = down_write_killable(&mm->mmap_lock); + if (!ret) + mm_lock_seqcount_begin(mm); __mmap_lock_trace_acquire_returned(mm, true, ret == 0); return ret; } +/* + * Drop all currently-held per-VMA locks. + * This is called from the mmap_lock implementation directly before releasing + * a write-locked mmap_lock (or downgrading it to read-locked). + * This should normally NOT be called manually from other places. + * If you want to call this manually anyway, keep in mind that this will release + * *all* VMA write locks, including ones from further up the stack. + */ +static inline void vma_end_write_all(struct mm_struct *mm) +{ + mmap_assert_write_locked(mm); + mm_lock_seqcount_end(mm); +} + static inline void mmap_write_unlock(struct mm_struct *mm) { __mmap_lock_trace_released(mm, true); diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 56972bd78462..01e0f591a20b 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -57,6 +57,7 @@ struct mmc_command { #define MMC_RSP_NONE (0) #define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) #define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY) +#define MMC_RSP_R1B_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE|MMC_RSP_BUSY) #define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC) #define MMC_RSP_R3 (MMC_RSP_PRESENT) #define MMC_RSP_R4 (MMC_RSP_PRESENT) @@ -64,9 +65,6 @@ struct mmc_command { #define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) #define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) -/* Can be used by core to poll after switch to MMC HS mode */ -#define MMC_RSP_R1_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE) - #define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE)) /* diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index f166d6611ddb..68f09a955a90 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -590,6 +590,14 @@ static inline struct mmc_host *mmc_from_priv(void *priv) return container_of(priv, struct mmc_host, private); } +#ifdef CONFIG_MMC_CRYPTO +static inline struct mmc_host * +mmc_from_crypto_profile(struct blk_crypto_profile *profile) +{ + return container_of(profile, struct mmc_host, crypto_profile); +} +#endif + #define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) #define mmc_dev(x) ((x)->parent) diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index d7cb1e5ecbda..a0a3894900ed 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -9,10 +9,12 @@ struct page; struct vm_area_struct; struct mm_struct; struct vma_iterator; +struct vma_merge_struct; void dump_page(const struct page *page, const char *reason); void dump_vma(const struct vm_area_struct *vma); void dump_mm(const struct mm_struct *mm); +void dump_vmg(const struct vma_merge_struct *vmg, const char *reason); void vma_iter_dump_tree(const struct vma_iterator *vmi); #ifdef CONFIG_DEBUG_VM @@ -87,6 +89,15 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); } \ unlikely(__ret_warn_once); \ }) +#define VM_WARN_ON_VMG(cond, vmg) ({ \ + int __ret_warn = !!(cond); \ + \ + if (unlikely(__ret_warn)) { \ + dump_vmg(vmg, "VM_WARN_ON_VMG(" __stringify(cond)")"); \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn); \ +}) #define VM_WARN_ON(cond) (void)WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) @@ -104,9 +115,10 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); #define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_VMG(cond, vmg) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) -#endif +#endif /* CONFIG_DEBUG_VM */ #ifdef CONFIG_DEBUG_VM_IRQSOFF #define VM_WARN_ON_IRQS_ENABLED() WARN_ON_ONCE(!irqs_disabled()) diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index bbaec80c78c5..ac01dc4eb2ce 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -24,6 +24,7 @@ static inline void leave_mm(void) { } #ifndef task_cpu_possible_mask # define task_cpu_possible_mask(p) cpu_possible_mask # define task_cpu_possible(cpu, p) true +# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_TICK) #else # define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p)) #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b36124145a16..9540b41894da 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -332,66 +332,88 @@ enum lruvec_flags { #endif /* !__GENERATING_BOUNDS_H */ /* - * Evictable pages are divided into multiple generations. The youngest and the + * Evictable folios are divided into multiple generations. The youngest and the * oldest generation numbers, max_seq and min_seq, are monotonically increasing. * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the * corresponding generation. The gen counter in folio->flags stores gen+1 while - * a page is on one of lrugen->folios[]. Otherwise it stores 0. + * a folio is on one of lrugen->folios[]. Otherwise it stores 0. * - * A page is added to the youngest generation on faulting. The aging needs to - * check the accessed bit at least twice before handing this page over to the - * eviction. The first check takes care of the accessed bit set on the initial - * fault; the second check makes sure this page hasn't been used since then. - * This process, AKA second chance, requires a minimum of two generations, - * hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive - * LRU, e.g., /proc/vmstat, these two generations are considered active; the - * rest of generations, if they exist, are considered inactive. See - * lru_gen_is_active(). + * After a folio is faulted in, the aging needs to check the accessed bit at + * least twice before handing this folio over to the eviction. The first check + * clears the accessed bit from the initial fault; the second check makes sure + * this folio hasn't been used since then. This process, AKA second chance, + * requires a minimum of two generations, hence MIN_NR_GENS. And to maintain ABI + * compatibility with the active/inactive LRU, e.g., /proc/vmstat, these two + * generations are considered active; the rest of generations, if they exist, + * are considered inactive. See lru_gen_is_active(). * - * PG_active is always cleared while a page is on one of lrugen->folios[] so - * that the aging needs not to worry about it. And it's set again when a page - * considered active is isolated for non-reclaiming purposes, e.g., migration. - * See lru_gen_add_folio() and lru_gen_del_folio(). + * PG_active is always cleared while a folio is on one of lrugen->folios[] so + * that the sliding window needs not to worry about it. And it's set again when + * a folio considered active is isolated for non-reclaiming purposes, e.g., + * migration. See lru_gen_add_folio() and lru_gen_del_folio(). * * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the * number of categories of the active/inactive LRU when keeping track of * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits - * in folio->flags. + * in folio->flags, masked by LRU_GEN_MASK. */ #define MIN_NR_GENS 2U #define MAX_NR_GENS 4U /* - * Each generation is divided into multiple tiers. A page accessed N times - * through file descriptors is in tier order_base_2(N). A page in the first tier - * (N=0,1) is marked by PG_referenced unless it was faulted in through page - * tables or read ahead. A page in any other tier (N>1) is marked by - * PG_referenced and PG_workingset. This implies a minimum of two tiers is - * supported without using additional bits in folio->flags. + * Each generation is divided into multiple tiers. A folio accessed N times + * through file descriptors is in tier order_base_2(N). A folio in the first + * tier (N=0,1) is marked by PG_referenced unless it was faulted in through page + * tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by + * PG_workingset. A folio in any other tier (1<N<5) between the first and last + * is marked by additional bits of LRU_REFS_WIDTH in folio->flags. * * In contrast to moving across generations which requires the LRU lock, moving * across tiers only involves atomic operations on folio->flags and therefore * has a negligible cost in the buffered access path. In the eviction path, - * comparisons of refaulted/(evicted+protected) from the first tier and the - * rest infer whether pages accessed multiple times through file descriptors - * are statistically hot and thus worth protecting. + * comparisons of refaulted/(evicted+protected) from the first tier and the rest + * infer whether folios accessed multiple times through file descriptors are + * statistically hot and thus worth protecting. * * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the * number of categories of the active/inactive LRU when keeping track of * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in - * folio->flags. + * folio->flags, masked by LRU_REFS_MASK. */ #define MAX_NR_TIERS 4U #ifndef __GENERATING_BOUNDS_H -struct lruvec; -struct page_vma_mapped_walk; - #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF) #define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF) +/* + * For folios accessed multiple times through file descriptors, + * lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags + * after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its + * bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily + * promoted into the second oldest generation in the eviction path. And when + * folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that + * lru_gen_inc_refs() can start over. Note that for this case, LRU_REFS_MASK is + * only valid when PG_referenced is set. + * + * For folios accessed multiple times through page tables, folio_update_gen() + * from a page table walk or lru_gen_set_refs() from a rmap walk sets + * PG_referenced after the accessed bit is cleared for the first time. + * Thereafter, those two paths set PG_workingset and promote folios to the + * youngest generation. Like folio_inc_gen(), folio_update_gen() also clears + * PG_referenced. Note that for this case, LRU_REFS_MASK is not used. + * + * For both cases above, after PG_workingset is set on a folio, it remains until + * this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It + * can be set again if lru_gen_test_recent() returns true upon a refault. + */ +#define LRU_REFS_FLAGS (LRU_REFS_MASK | BIT(PG_referenced)) + +struct lruvec; +struct page_vma_mapped_walk; + #ifdef CONFIG_LRU_GEN enum { @@ -406,8 +428,6 @@ enum { NR_LRU_GEN_CAPS }; -#define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset)) - #define MIN_LRU_BATCH BITS_PER_LONG #define MAX_LRU_BATCH (MIN_LRU_BATCH * 64) @@ -421,12 +441,11 @@ enum { /* * The youngest generation number is stored in max_seq for both anon and file * types as they are aged on an equal footing. The oldest generation numbers are - * stored in min_seq[] separately for anon and file types as clean file pages - * can be evicted regardless of swap constraints. - * - * Normally anon and file min_seq are in sync. But if swapping is constrained, - * e.g., out of swap space, file min_seq is allowed to advance and leave anon - * min_seq behind. + * stored in min_seq[] separately for anon and file types so that they can be + * incremented independently. Ideally min_seq[] are kept in sync when both anon + * and file types are evictable. However, to adapt to situations like extreme + * swappiness, they are allowed to be out of sync by at most + * MAX_NR_GENS-MIN_NR_GENS-1. * * The number of pages in each generation is eventually consistent and therefore * can be transiently negative when reset_batch_size() is pending. @@ -446,8 +465,8 @@ struct lru_gen_folio { unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS]; /* the exponential moving average of evicted+protected */ unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS]; - /* the first tier doesn't need protection, hence the minus one */ - unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1]; + /* can only be modified under the LRU lock */ + unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; /* can be modified without holding the LRU lock */ atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; @@ -498,7 +517,7 @@ struct lru_gen_mm_walk { int mm_stats[NR_MM_STATS]; /* total batched items */ int batched; - bool can_swap; + int swappiness; bool force_scan; }; diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 4338b1b4ac44..d67614f7b7f1 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -700,6 +700,8 @@ struct x86_cpu_id { #define X86_FAMILY_ANY 0 #define X86_MODEL_ANY 0 #define X86_STEPPING_ANY 0 +#define X86_STEP_MIN 0 +#define X86_STEP_MAX 0xf #define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ /* diff --git a/include/linux/module.h b/include/linux/module.h index b3a643435357..37eb5d88f6eb 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -52,9 +52,9 @@ struct module_kobject { struct module_attribute { struct attribute attr; - ssize_t (*show)(struct module_attribute *, struct module_kobject *, + ssize_t (*show)(const struct module_attribute *, struct module_kobject *, char *); - ssize_t (*store)(struct module_attribute *, struct module_kobject *, + ssize_t (*store)(const struct module_attribute *, struct module_kobject *, const char *, size_t count); void (*setup)(struct module *, const char *); int (*test)(struct module *); @@ -67,10 +67,10 @@ struct module_version_attribute { const char *version; }; -extern ssize_t __modver_version_show(struct module_attribute *, +extern ssize_t __modver_version_show(const struct module_attribute *, struct module_kobject *, char *); -extern struct module_attribute module_uevent; +extern const struct module_attribute module_uevent; /* These are either module local, or the kernel's dummy ones. */ extern int init_module(void); @@ -275,7 +275,7 @@ extern typeof(name) __mod_device_table__##type##__##name \ #else #define MODULE_VERSION(_version) \ MODULE_INFO(version, _version); \ - static struct module_version_attribute __modver_attr \ + static const struct module_version_attribute __modver_attr \ __used __section("__modver") \ __aligned(__alignof__(struct module_version_attribute)) \ = { \ diff --git a/include/linux/mount.h b/include/linux/mount.h index 04213d8ef837..dcc17ce8a959 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -75,7 +75,7 @@ struct vfsmount { static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt) { /* Pairs with smp_store_release() in do_idmap_mount(). */ - return smp_load_acquire(&mnt->mnt_idmap); + return READ_ONCE(mnt->mnt_idmap); } extern int mnt_want_write(struct vfsmount *mnt); @@ -113,7 +113,7 @@ extern struct vfsmount *kern_mount(struct file_system_type *); extern void kern_unmount(struct vfsmount *mnt); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); -extern long do_mount(const char *, const char __user *, +int do_mount(const char *, const char __user *, const char *, unsigned long, void *); extern struct vfsmount *collect_mounts(const struct path *); extern void drop_collected_mounts(struct vfsmount *); diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index 9dd4bf157255..58a2401e4b55 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -146,9 +146,9 @@ struct mr_mfc { unsigned long last_assert; int minvif; int maxvif; - unsigned long bytes; - unsigned long pkt; - unsigned long wrong_if; + atomic_long_t bytes; + atomic_long_t pkt; + atomic_long_t wrong_if; unsigned long lastuse; unsigned char ttls[MAXVIFS]; refcount_t refcount; diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h new file mode 100644 index 000000000000..4d9b736ff8b7 --- /dev/null +++ b/include/linux/mtd/nand-qpic-common.h @@ -0,0 +1,478 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * QCOM QPIC common APIs header file + * + * Copyright (c) 2023 Qualcomm Inc. + * Authors: Md sadre Alam <quic_mdalam@quicinc.com> + * + */ +#ifndef __MTD_NAND_QPIC_COMMON_H__ +#define __MTD_NAND_QPIC_COMMON_H__ + +/* NANDc reg offsets */ +#define NAND_FLASH_CMD 0x00 +#define NAND_ADDR0 0x04 +#define NAND_ADDR1 0x08 +#define NAND_FLASH_CHIP_SELECT 0x0c +#define NAND_EXEC_CMD 0x10 +#define NAND_FLASH_STATUS 0x14 +#define NAND_BUFFER_STATUS 0x18 +#define NAND_DEV0_CFG0 0x20 +#define NAND_DEV0_CFG1 0x24 +#define NAND_DEV0_ECC_CFG 0x28 +#define NAND_AUTO_STATUS_EN 0x2c +#define NAND_DEV1_CFG0 0x30 +#define NAND_DEV1_CFG1 0x34 +#define NAND_READ_ID 0x40 +#define NAND_READ_STATUS 0x44 +#define NAND_DEV_CMD0 0xa0 +#define NAND_DEV_CMD1 0xa4 +#define NAND_DEV_CMD2 0xa8 +#define NAND_DEV_CMD_VLD 0xac +#define SFLASHC_BURST_CFG 0xe0 +#define NAND_ERASED_CW_DETECT_CFG 0xe8 +#define NAND_ERASED_CW_DETECT_STATUS 0xec +#define NAND_EBI2_ECC_BUF_CFG 0xf0 +#define FLASH_BUF_ACC 0x100 + +#define NAND_CTRL 0xf00 +#define NAND_VERSION 0xf08 +#define NAND_READ_LOCATION_0 0xf20 +#define NAND_READ_LOCATION_1 0xf24 +#define NAND_READ_LOCATION_2 0xf28 +#define NAND_READ_LOCATION_3 0xf2c +#define NAND_READ_LOCATION_LAST_CW_0 0xf40 +#define NAND_READ_LOCATION_LAST_CW_1 0xf44 +#define NAND_READ_LOCATION_LAST_CW_2 0xf48 +#define NAND_READ_LOCATION_LAST_CW_3 0xf4c + +/* dummy register offsets, used by qcom_write_reg_dma */ +#define NAND_DEV_CMD1_RESTORE 0xdead +#define NAND_DEV_CMD_VLD_RESTORE 0xbeef + +/* NAND_FLASH_CMD bits */ +#define PAGE_ACC BIT(4) +#define LAST_PAGE BIT(5) + +/* NAND_FLASH_CHIP_SELECT bits */ +#define NAND_DEV_SEL 0 +#define DM_EN BIT(2) + +/* NAND_FLASH_STATUS bits */ +#define FS_OP_ERR BIT(4) +#define FS_READY_BSY_N BIT(5) +#define FS_MPU_ERR BIT(8) +#define FS_DEVICE_STS_ERR BIT(16) +#define FS_DEVICE_WP BIT(23) + +/* NAND_BUFFER_STATUS bits */ +#define BS_UNCORRECTABLE_BIT BIT(8) +#define BS_CORRECTABLE_ERR_MSK 0x1f + +/* NAND_DEVn_CFG0 bits */ +#define DISABLE_STATUS_AFTER_WRITE BIT(4) +#define CW_PER_PAGE 6 +#define CW_PER_PAGE_MASK GENMASK(8, 6) +#define UD_SIZE_BYTES 9 +#define UD_SIZE_BYTES_MASK GENMASK(18, 9) +#define ECC_PARITY_SIZE_BYTES_RS GENMASK(22, 19) +#define SPARE_SIZE_BYTES 23 +#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23) +#define NUM_ADDR_CYCLES 27 +#define NUM_ADDR_CYCLES_MASK GENMASK(29, 27) +#define STATUS_BFR_READ BIT(30) +#define SET_RD_MODE_AFTER_STATUS BIT(31) + +/* NAND_DEVn_CFG0 bits */ +#define DEV0_CFG1_ECC_DISABLE BIT(0) +#define WIDE_FLASH BIT(1) +#define NAND_RECOVERY_CYCLES 2 +#define NAND_RECOVERY_CYCLES_MASK GENMASK(4, 2) +#define CS_ACTIVE_BSY BIT(5) +#define BAD_BLOCK_BYTE_NUM 6 +#define BAD_BLOCK_BYTE_NUM_MASK GENMASK(15, 6) +#define BAD_BLOCK_IN_SPARE_AREA BIT(16) +#define WR_RD_BSY_GAP 17 +#define WR_RD_BSY_GAP_MASK GENMASK(22, 17) +#define ENABLE_BCH_ECC BIT(27) + +/* NAND_DEV0_ECC_CFG bits */ +#define ECC_CFG_ECC_DISABLE BIT(0) +#define ECC_SW_RESET BIT(1) +#define ECC_MODE 4 +#define ECC_MODE_MASK GENMASK(5, 4) +#define ECC_PARITY_SIZE_BYTES_BCH 8 +#define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8) +#define ECC_NUM_DATA_BYTES 16 +#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16) +#define ECC_FORCE_CLK_OPEN BIT(30) + +/* NAND_DEV_CMD1 bits */ +#define READ_ADDR 0 + +/* NAND_DEV_CMD_VLD bits */ +#define READ_START_VLD BIT(0) +#define READ_STOP_VLD BIT(1) +#define WRITE_START_VLD BIT(2) +#define ERASE_START_VLD BIT(3) +#define SEQ_READ_START_VLD BIT(4) + +/* NAND_EBI2_ECC_BUF_CFG bits */ +#define NUM_STEPS 0 + +/* NAND_ERASED_CW_DETECT_CFG bits */ +#define ERASED_CW_ECC_MASK 1 +#define AUTO_DETECT_RES 0 +#define MASK_ECC BIT(ERASED_CW_ECC_MASK) +#define RESET_ERASED_DET BIT(AUTO_DETECT_RES) +#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES) +#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC) +#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC) + +/* NAND_ERASED_CW_DETECT_STATUS bits */ +#define PAGE_ALL_ERASED BIT(7) +#define CODEWORD_ALL_ERASED BIT(6) +#define PAGE_ERASED BIT(5) +#define CODEWORD_ERASED BIT(4) +#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED) +#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED) + +/* NAND_READ_LOCATION_n bits */ +#define READ_LOCATION_OFFSET 0 +#define READ_LOCATION_SIZE 16 +#define READ_LOCATION_LAST 31 + +/* Version Mask */ +#define NAND_VERSION_MAJOR_MASK 0xf0000000 +#define NAND_VERSION_MAJOR_SHIFT 28 +#define NAND_VERSION_MINOR_MASK 0x0fff0000 +#define NAND_VERSION_MINOR_SHIFT 16 + +/* NAND OP_CMDs */ +#define OP_PAGE_READ 0x2 +#define OP_PAGE_READ_WITH_ECC 0x3 +#define OP_PAGE_READ_WITH_ECC_SPARE 0x4 +#define OP_PAGE_READ_ONFI_READ 0x5 +#define OP_PROGRAM_PAGE 0x6 +#define OP_PAGE_PROGRAM_WITH_ECC 0x7 +#define OP_PROGRAM_PAGE_SPARE 0x9 +#define OP_BLOCK_ERASE 0xa +#define OP_CHECK_STATUS 0xc +#define OP_FETCH_ID 0xb +#define OP_RESET_DEVICE 0xd + +/* Default Value for NAND_DEV_CMD_VLD */ +#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ + ERASE_START_VLD | SEQ_READ_START_VLD) + +/* NAND_CTRL bits */ +#define BAM_MODE_EN BIT(0) + +/* + * the NAND controller performs reads/writes with ECC in 516 byte chunks. + * the driver calls the chunks 'step' or 'codeword' interchangeably + */ +#define NANDC_STEP_SIZE 512 + +/* + * the largest page size we support is 8K, this will have 16 steps/codewords + * of 512 bytes each + */ +#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE) + +/* we read at most 3 registers per codeword scan */ +#define MAX_REG_RD (3 * MAX_NUM_STEPS) + +/* ECC modes supported by the controller */ +#define ECC_NONE BIT(0) +#define ECC_RS_4BIT BIT(1) +#define ECC_BCH_4BIT BIT(2) +#define ECC_BCH_8BIT BIT(3) + +/* + * Returns the actual register address for all NAND_DEV_ registers + * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD) + */ +#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg)) + +/* Returns the NAND register physical address */ +#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset)) + +/* Returns the dma address for reg read buffer */ +#define reg_buf_dma_addr(chip, vaddr) \ + ((chip)->reg_read_dma + \ + ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf)) + +#define QPIC_PER_CW_CMD_ELEMENTS 32 +#define QPIC_PER_CW_CMD_SGL 32 +#define QPIC_PER_CW_DATA_SGL 8 + +#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000) + +/* + * Flags used in DMA descriptor preparation helper functions + * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma) + */ +/* Don't set the EOT in current tx BAM sgl */ +#define NAND_BAM_NO_EOT BIT(0) +/* Set the NWD flag in current BAM sgl */ +#define NAND_BAM_NWD BIT(1) +/* Finish writing in the current BAM sgl and start writing in another BAM sgl */ +#define NAND_BAM_NEXT_SGL BIT(2) +/* + * Erased codeword status is being used two times in single transfer so this + * flag will determine the current value of erased codeword status register + */ +#define NAND_ERASED_CW_SET BIT(4) + +#define MAX_ADDRESS_CYCLE 5 + +/* + * This data type corresponds to the BAM transaction which will be used for all + * NAND transfers. + * @bam_ce - the array of BAM command elements + * @cmd_sgl - sgl for NAND BAM command pipe + * @data_sgl - sgl for NAND BAM consumer/producer pipe + * @last_data_desc - last DMA desc in data channel (tx/rx). + * @last_cmd_desc - last DMA desc in command channel. + * @txn_done - completion for NAND transfer. + * @bam_ce_pos - the index in bam_ce which is available for next sgl + * @bam_ce_start - the index in bam_ce which marks the start position ce + * for current sgl. It will be used for size calculation + * for current sgl + * @cmd_sgl_pos - current index in command sgl. + * @cmd_sgl_start - start index in command sgl. + * @tx_sgl_pos - current index in data sgl for tx. + * @tx_sgl_start - start index in data sgl for tx. + * @rx_sgl_pos - current index in data sgl for rx. + * @rx_sgl_start - start index in data sgl for rx. + */ +struct bam_transaction { + struct bam_cmd_element *bam_ce; + struct scatterlist *cmd_sgl; + struct scatterlist *data_sgl; + struct dma_async_tx_descriptor *last_data_desc; + struct dma_async_tx_descriptor *last_cmd_desc; + struct completion txn_done; + struct_group(bam_positions, + u32 bam_ce_pos; + u32 bam_ce_start; + u32 cmd_sgl_pos; + u32 cmd_sgl_start; + u32 tx_sgl_pos; + u32 tx_sgl_start; + u32 rx_sgl_pos; + u32 rx_sgl_start; + + ); +}; + +/* + * This data type corresponds to the nand dma descriptor + * @dma_desc - low level DMA engine descriptor + * @list - list for desc_info + * + * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by + * ADM + * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM + * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM + * @dir - DMA transfer direction + */ +struct desc_info { + struct dma_async_tx_descriptor *dma_desc; + struct list_head node; + + union { + struct scatterlist adm_sgl; + struct { + struct scatterlist *bam_sgl; + int sgl_cnt; + }; + }; + enum dma_data_direction dir; +}; + +/* + * holds the current register values that we want to write. acts as a contiguous + * chunk of memory which we use to write the controller registers through DMA. + */ +struct nandc_regs { + __le32 cmd; + __le32 addr0; + __le32 addr1; + __le32 chip_sel; + __le32 exec; + + __le32 cfg0; + __le32 cfg1; + __le32 ecc_bch_cfg; + + __le32 clrflashstatus; + __le32 clrreadstatus; + + __le32 cmd1; + __le32 vld; + + __le32 orig_cmd1; + __le32 orig_vld; + + __le32 ecc_buf_cfg; + __le32 read_location0; + __le32 read_location1; + __le32 read_location2; + __le32 read_location3; + __le32 read_location_last0; + __le32 read_location_last1; + __le32 read_location_last2; + __le32 read_location_last3; + + __le32 erased_cw_detect_cfg_clr; + __le32 erased_cw_detect_cfg_set; +}; + +/* + * NAND controller data struct + * + * @dev: parent device + * + * @base: MMIO base + * + * @core_clk: controller clock + * @aon_clk: another controller clock + * + * @regs: a contiguous chunk of memory for DMA register + * writes. contains the register values to be + * written to controller + * + * @props: properties of current NAND controller, + * initialized via DT match data + * + * @controller: base controller structure + * @host_list: list containing all the chips attached to the + * controller + * + * @chan: dma channel + * @cmd_crci: ADM DMA CRCI for command flow control + * @data_crci: ADM DMA CRCI for data flow control + * + * @desc_list: DMA descriptor list (list of desc_infos) + * + * @data_buffer: our local DMA buffer for page read/writes, + * used when we can't use the buffer provided + * by upper layers directly + * @reg_read_buf: local buffer for reading back registers via DMA + * + * @base_phys: physical base address of controller registers + * @base_dma: dma base address of controller registers + * @reg_read_dma: contains dma address for register read buffer + * + * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf + * functions + * @max_cwperpage: maximum QPIC codewords required. calculated + * from all connected NAND devices pagesize + * + * @reg_read_pos: marker for data read in reg_read_buf + * + * @cmd1/vld: some fixed controller register values + * + * @exec_opwrite: flag to select correct number of code word + * while reading status + */ +struct qcom_nand_controller { + struct device *dev; + + void __iomem *base; + + struct clk *core_clk; + struct clk *aon_clk; + + struct nandc_regs *regs; + struct bam_transaction *bam_txn; + + const struct qcom_nandc_props *props; + + struct nand_controller *controller; + struct list_head host_list; + + union { + /* will be used only by QPIC for BAM DMA */ + struct { + struct dma_chan *tx_chan; + struct dma_chan *rx_chan; + struct dma_chan *cmd_chan; + }; + + /* will be used only by EBI2 for ADM DMA */ + struct { + struct dma_chan *chan; + unsigned int cmd_crci; + unsigned int data_crci; + }; + }; + + struct list_head desc_list; + + u8 *data_buffer; + __le32 *reg_read_buf; + + phys_addr_t base_phys; + dma_addr_t base_dma; + dma_addr_t reg_read_dma; + + int buf_size; + int buf_count; + int buf_start; + unsigned int max_cwperpage; + + int reg_read_pos; + + u32 cmd1, vld; + bool exec_opwrite; +}; + +/* + * This data type corresponds to the NAND controller properties which varies + * among different NAND controllers. + * @ecc_modes - ecc mode for NAND + * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset + * @supports_bam - whether NAND controller is using BAM + * @nandc_part_of_qpic - whether NAND controller is part of qpic IP + * @qpic_version2 - flag to indicate QPIC IP version 2 + * @use_codeword_fixup - whether NAND has different layout for boot partitions + */ +struct qcom_nandc_props { + u32 ecc_modes; + u32 dev_cmd_reg_start; + bool supports_bam; + bool nandc_part_of_qpic; + bool qpic_version2; + bool use_codeword_fixup; +}; + +void qcom_free_bam_transaction(struct qcom_nand_controller *nandc); +struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc); +void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc); +void qcom_qpic_bam_dma_done(void *data); +void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu); +int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc, + struct dma_chan *chan, unsigned long flags); +int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, + int reg_off, const void *vaddr, int size, unsigned int flags); +int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, + const void *vaddr, int size, unsigned int flags); +int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off, + const void *vaddr, int size, bool flow_control); +int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs, + unsigned int flags); +int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first, + int num_regs, unsigned int flags); +int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr, + int size, unsigned int flags); +int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr, + int size, unsigned int flags); +int qcom_submit_descs(struct qcom_nand_controller *nandc); +void qcom_clear_read_regs(struct qcom_nand_controller *nandc); +void qcom_nandc_unalloc(struct qcom_nand_controller *nandc); +int qcom_nandc_alloc(struct qcom_nand_controller *nandc); +#endif + diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 702e5fb13dae..0da8a1c7740e 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -62,18 +62,38 @@ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) -#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \ - SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ +#define SPINAND_PAGE_READ_FROM_CACHE_OP(addr, ndummy, buf, len, ...) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 1), \ + __VA_OPT__(SPI_MEM_OP_MAX_FREQ(__VA_ARGS__))) + +#define SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \ + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 1)) + +#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 1)) -#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \ - SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ +#define SPINAND_PAGE_READ_FROM_CACHE_FAST_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 1)) +#define SPINAND_PAGE_READ_FROM_CACHE_DTR_OP(addr, ndummy, buf, len, freq) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x0d, 1), \ + SPI_MEM_DTR_OP_ADDR(2, addr, 1), \ + SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \ + SPI_MEM_DTR_OP_DATA_IN(len, buf, 1), \ + SPI_MEM_OP_MAX_FREQ(freq)) + #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ @@ -86,6 +106,13 @@ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) +#define SPINAND_PAGE_READ_FROM_CACHE_X2_DTR_OP(addr, ndummy, buf, len, freq) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x3d, 1), \ + SPI_MEM_DTR_OP_ADDR(2, addr, 1), \ + SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \ + SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \ + SPI_MEM_OP_MAX_FREQ(freq)) + #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ @@ -98,6 +125,13 @@ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) +#define SPINAND_PAGE_READ_FROM_CACHE_X4_DTR_OP(addr, ndummy, buf, len, freq) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x6d, 1), \ + SPI_MEM_DTR_OP_ADDR(2, addr, 1), \ + SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \ + SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \ + SPI_MEM_OP_MAX_FREQ(freq)) + #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ SPI_MEM_OP_ADDR(2, addr, 2), \ @@ -110,6 +144,13 @@ SPI_MEM_OP_DUMMY(ndummy, 2), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) +#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_DTR_OP(addr, ndummy, buf, len, freq) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xbd, 1), \ + SPI_MEM_DTR_OP_ADDR(2, addr, 2), \ + SPI_MEM_DTR_OP_DUMMY(ndummy, 2), \ + SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \ + SPI_MEM_OP_MAX_FREQ(freq)) + #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ SPI_MEM_OP_ADDR(2, addr, 4), \ @@ -122,6 +163,13 @@ SPI_MEM_OP_DUMMY(ndummy, 4), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) +#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_DTR_OP(addr, ndummy, buf, len, freq) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xed, 1), \ + SPI_MEM_DTR_OP_ADDR(2, addr, 4), \ + SPI_MEM_DTR_OP_DUMMY(ndummy, 4), \ + SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \ + SPI_MEM_OP_MAX_FREQ(freq)) + #define SPINAND_PROG_EXEC_OP(addr) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ @@ -268,6 +316,7 @@ extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; extern const struct spinand_manufacturer macronix_spinand_manufacturer; extern const struct spinand_manufacturer micron_spinand_manufacturer; extern const struct spinand_manufacturer paragon_spinand_manufacturer; +extern const struct spinand_manufacturer skyhigh_spinand_manufacturer; extern const struct spinand_manufacturer toshiba_spinand_manufacturer; extern const struct spinand_manufacturer winbond_spinand_manufacturer; extern const struct spinand_manufacturer xtx_spinand_manufacturer; @@ -314,6 +363,7 @@ struct spinand_ecc_info { #define SPINAND_HAS_CR_FEAT_BIT BIT(1) #define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2) #define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3) +#define SPINAND_NO_RAW_ACCESS BIT(4) /** * struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure diff --git a/include/linux/net.h b/include/linux/net.h index b75bc534c1b3..0ff950eecc6b 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -343,8 +343,6 @@ static inline bool sendpages_ok(struct page *page, size_t len, size_t offset) int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len); -int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, - struct kvec *vec, size_t num, size_t len); int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len, int flags); diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h index 662074b08c94..ff0758e88ea1 100644 --- a/include/linux/net_tstamp.h +++ b/include/linux/net_tstamp.h @@ -20,6 +20,33 @@ enum hwtstamp_source { }; /** + * struct hwtstamp_provider_desc - hwtstamp provider description + * + * @index: index of the hwtstamp provider. + * @qualifier: hwtstamp provider qualifier. + */ +struct hwtstamp_provider_desc { + int index; + enum hwtstamp_provider_qualifier qualifier; +}; + +/** + * struct hwtstamp_provider - hwtstamp provider object + * + * @rcu_head: RCU callback used to free the struct. + * @source: source of the hwtstamp provider. + * @phydev: pointer of the phydev source in case a PTP coming from phylib + * @desc: hwtstamp provider description. + */ + +struct hwtstamp_provider { + struct rcu_head rcu_head; + enum hwtstamp_source source; + struct phy_device *phydev; + struct hwtstamp_provider_desc desc; +}; + +/** * struct kernel_hwtstamp_config - Kernel copy of struct hwtstamp_config * * @flags: see struct hwtstamp_config @@ -31,6 +58,7 @@ enum hwtstamp_source { * copied the ioctl request back to user space * @source: indication whether timestamps should come from the netdev or from * an attached phylib PHY + * @qualifier: qualifier of the hwtstamp provider * * Prefer using this structure for in-kernel processing of hardware * timestamping configuration, over the inextensible struct hwtstamp_config @@ -43,6 +71,7 @@ struct kernel_hwtstamp_config { struct ifreq *ifr; bool copied_to_user; enum hwtstamp_source source; + enum hwtstamp_provider_qualifier qualifier; }; static inline void hwtstamp_config_to_kernel(struct kernel_hwtstamp_config *kernel_cfg, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ecc686409161..8da4c61f97b9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -63,6 +63,7 @@ struct dsa_port; struct ip_tunnel_parm_kern; struct macsec_context; struct macsec_ops; +struct netdev_config; struct netdev_name_node; struct sd_flow_limit; struct sfp_bus; @@ -82,6 +83,7 @@ struct xdp_metadata_ops; struct xdp_md; struct ethtool_netdev_state; struct phy_link_topology; +struct hwtstamp_provider; typedef u32 xdp_features_t; @@ -381,8 +383,9 @@ struct napi_struct { struct sk_buff *skb; struct list_head rx_list; /* Pending GRO_NORMAL skbs */ int rx_count; /* length of rx_list */ - unsigned int napi_id; + unsigned int napi_id; /* protected by netdev_lock */ struct hrtimer timer; + /* all fields past this point are write-protected by netdev_lock */ struct task_struct *thread; unsigned long gro_flush_timeout; unsigned long irq_suspend_timeout; @@ -509,7 +512,7 @@ static inline bool napi_prefer_busy_poll(struct napi_struct *n) * is scheduled for example in the context of delayed timer * that can be skipped if a NAPI is already scheduled. * - * Return True if NAPI is scheduled, False otherwise. + * Return: True if NAPI is scheduled, False otherwise. */ static inline bool napi_is_scheduled(struct napi_struct *n) { @@ -524,7 +527,7 @@ bool napi_schedule_prep(struct napi_struct *n); * * Schedule NAPI poll routine to be called if it is not already * running. - * Return true if we schedule a NAPI or false if not. + * Return: true if we schedule a NAPI or false if not. * Refer to napi_schedule_prep() for additional reason on why * a NAPI might not be scheduled. */ @@ -558,7 +561,7 @@ static inline void napi_schedule_irqoff(struct napi_struct *n) * Mark NAPI processing as complete. Should only be called if poll budget * has not been completely consumed. * Prefer over napi_complete(). - * Return false if device should avoid rearming interrupts. + * Return: false if device should avoid rearming interrupts. */ bool napi_complete_done(struct napi_struct *n, int work_done); @@ -569,16 +572,11 @@ static inline bool napi_complete(struct napi_struct *n) int dev_set_threaded(struct net_device *dev, bool threaded); -/** - * napi_disable - prevent NAPI from scheduling - * @n: NAPI context - * - * Stop NAPI from being scheduled on this context. - * Waits till any outstanding processing completes. - */ void napi_disable(struct napi_struct *n); +void napi_disable_locked(struct napi_struct *n); void napi_enable(struct napi_struct *n); +void napi_enable_locked(struct napi_struct *n); /** * napi_synchronize - wait until NAPI is not running @@ -2045,6 +2043,7 @@ enum netdev_reg_state { * * @neighbours: List heads pointing to this device's neighbours' * dev_list, one per address-family. + * @hwprov: Tracks which PTP performs hardware packet time stamping. * * FIXME: cleanup struct net_device such that network protocol info * moves out. @@ -2259,7 +2258,7 @@ struct net_device { void *atalk_ptr; #endif #if IS_ENABLED(CONFIG_AX25) - void *ax25_ptr; + struct ax25_dev __rcu *ax25_ptr; #endif #if IS_ENABLED(CONFIG_CFG80211) struct wireless_dev *ieee80211_ptr; @@ -2412,6 +2411,14 @@ struct net_device { const struct udp_tunnel_nic_info *udp_tunnel_nic_info; struct udp_tunnel_nic *udp_tunnel_nic; + /** @cfg: net_device queue-related configuration */ + struct netdev_config *cfg; + /** + * @cfg_pending: same as @cfg but when device is being actively + * reconfigured includes any changes to the configuration + * requested by the user, but which may or may not be rejected. + */ + struct netdev_config *cfg_pending; struct ethtool_netdev_state *ethtool; /* protected by rtnl_lock */ @@ -2442,8 +2449,27 @@ struct net_device { u32 napi_defer_hard_irqs; /** - * @lock: protects @net_shaper_hierarchy, feel free to use for other - * netdev-scope protection. Ordering: take after rtnl_lock. + * @up: copy of @state's IFF_UP, but safe to read with just @lock. + * May report false negatives while the device is being opened + * or closed (@lock does not protect .ndo_open, or .ndo_close). + */ + bool up; + + /** + * @lock: netdev-scope lock, protects a small selection of fields. + * Should always be taken using netdev_lock() / netdev_unlock() helpers. + * Drivers are free to use it for other protection. + * + * Protects: + * @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list, + * @net_shaper_hierarchy, @reg_state, @threaded + * + * Partially protects (writers must hold both @lock and rtnl_lock): + * @up + * + * Also protects some fields in struct napi_struct. + * + * Ordering: take after rtnl_lock. */ struct mutex lock; @@ -2457,6 +2483,8 @@ struct net_device { struct hlist_head neighbours[NEIGH_NR_TABLES]; + struct hwtstamp_provider __rcu *hwprov; + u8 priv[] ____cacheline_aligned __counted_by(priv_len); } ____cacheline_aligned; @@ -2667,18 +2695,58 @@ void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, enum netdev_queue_type type, struct napi_struct *napi); -static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) +static inline void netdev_lock(struct net_device *dev) +{ + mutex_lock(&dev->lock); +} + +static inline void netdev_unlock(struct net_device *dev) +{ + mutex_unlock(&dev->lock); +} + +static inline void netdev_assert_locked(struct net_device *dev) +{ + lockdep_assert_held(&dev->lock); +} + +static inline void netdev_assert_locked_or_invisible(struct net_device *dev) +{ + if (dev->reg_state == NETREG_REGISTERED || + dev->reg_state == NETREG_UNREGISTERING) + netdev_assert_locked(dev); +} + +static inline void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) { napi->irq = irq; } +static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) +{ + netdev_lock(napi->dev); + netif_napi_set_irq_locked(napi, irq); + netdev_unlock(napi->dev); +} + /* Default NAPI poll() weight * Device drivers are strongly advised to not use bigger value */ #define NAPI_POLL_WEIGHT 64 -void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, - int (*poll)(struct napi_struct *, int), int weight); +void netif_napi_add_weight_locked(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), + int weight); + +static inline void +netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + netdev_lock(dev); + netif_napi_add_weight_locked(dev, napi, poll, weight); + netdev_unlock(dev); +} /** * netif_napi_add() - initialize a NAPI context @@ -2697,6 +2765,13 @@ netif_napi_add(struct net_device *dev, struct napi_struct *napi, } static inline void +netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int)) +{ + netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT); +} + +static inline void netif_napi_add_tx_weight(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), @@ -2706,6 +2781,15 @@ netif_napi_add_tx_weight(struct net_device *dev, netif_napi_add_weight(dev, napi, poll, weight); } +static inline void +netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int index) +{ + napi->index = index; + napi->config = &dev->napi_config[index]; + netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT); +} + /** * netif_napi_add_config - initialize a NAPI context with persistent config * @dev: network device @@ -2717,9 +2801,9 @@ static inline void netif_napi_add_config(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int index) { - napi->index = index; - napi->config = &dev->napi_config[index]; - netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); + netdev_lock(dev); + netif_napi_add_config_locked(dev, napi, poll, index); + netdev_unlock(dev); } /** @@ -2739,6 +2823,8 @@ static inline void netif_napi_add_tx(struct net_device *dev, netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); } +void __netif_napi_del_locked(struct napi_struct *napi); + /** * __netif_napi_del - remove a NAPI context * @napi: NAPI context @@ -2747,7 +2833,18 @@ static inline void netif_napi_add_tx(struct net_device *dev, * containing @napi. Drivers might want to call this helper to combine * all the needed RCU grace periods into a single one. */ -void __netif_napi_del(struct napi_struct *napi); +static inline void __netif_napi_del(struct napi_struct *napi) +{ + netdev_lock(napi->dev); + __netif_napi_del_locked(napi); + netdev_unlock(napi->dev); +} + +static inline void netif_napi_del_locked(struct napi_struct *napi) +{ + __netif_napi_del_locked(napi); + synchronize_net(); +} /** * netif_napi_del - remove a NAPI context @@ -2854,6 +2951,46 @@ static inline void dev_lstats_add(struct net_device *dev, unsigned int len) u64_stats_update_end(&lstats->syncp); } +static inline void dev_dstats_rx_add(struct net_device *dev, + unsigned int len) +{ + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + u64_stats_inc(&dstats->rx_packets); + u64_stats_add(&dstats->rx_bytes, len); + u64_stats_update_end(&dstats->syncp); +} + +static inline void dev_dstats_rx_dropped(struct net_device *dev) +{ + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + u64_stats_inc(&dstats->rx_drops); + u64_stats_update_end(&dstats->syncp); +} + +static inline void dev_dstats_tx_add(struct net_device *dev, + unsigned int len) +{ + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + u64_stats_inc(&dstats->tx_packets); + u64_stats_add(&dstats->tx_bytes, len); + u64_stats_update_end(&dstats->syncp); +} + +static inline void dev_dstats_tx_dropped(struct net_device *dev) +{ + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + u64_stats_inc(&dstats->tx_drops); + u64_stats_update_end(&dstats->syncp); +} + #define __netdev_alloc_pcpu_stats(type, gfp) \ ({ \ typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ @@ -3194,7 +3331,6 @@ static inline void unregister_netdevice(struct net_device *dev) int netdev_refcnt_read(const struct net_device *dev); void free_netdev(struct net_device *dev); -void init_dummy_netdev(struct net_device *dev); struct net_device *netdev_get_xmit_slave(struct net_device *dev, struct sk_buff *skb, @@ -3208,7 +3344,6 @@ struct net_device *netdev_get_by_index(struct net *net, int ifindex, struct net_device *netdev_get_by_name(struct net *net, const char *name, netdevice_tracker *tracker, gfp_t gfp); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); -struct net_device *dev_get_by_napi_id(unsigned int napi_id); void netdev_copy_name(struct net_device *dev, char *name); static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, @@ -3322,6 +3457,7 @@ struct softnet_data { }; DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); +DECLARE_PER_CPU(struct page_pool *, system_page_pool); #ifndef CONFIG_PREEMPT_RT static inline int dev_recursion_level(void) @@ -3810,7 +3946,7 @@ static inline bool netif_attr_test_mask(unsigned long j, * @online_mask: bitmask for CPUs/Rx queues that are online * @nr_bits: number of bits in the bitmask * - * Returns true if a CPU/Rx queue is online. + * Returns: true if a CPU/Rx queue is online. */ static inline bool netif_attr_test_online(unsigned long j, const unsigned long *online_mask, @@ -3830,7 +3966,8 @@ static inline bool netif_attr_test_online(unsigned long j, * @srcp: the cpumask/Rx queue mask pointer * @nr_bits: number of bits in the bitmask * - * Returns >= nr_bits if no further CPUs/Rx queues set. + * Returns: next (after n) CPU/Rx queue index in the mask; + * >= nr_bits if no further CPUs/Rx queues set. */ static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, unsigned int nr_bits) @@ -3852,7 +3989,8 @@ static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, * @src2p: the second CPUs/Rx queues mask pointer * @nr_bits: number of bits in the bitmask * - * Returns >= nr_bits if no further CPUs/Rx queues set in both. + * Returns: next (after n) CPU/Rx queue index set in both masks; + * >= nr_bits if no further CPUs/Rx queues set in both. */ static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, const unsigned long *src2p, @@ -3958,9 +4096,9 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) } u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog); -void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); -int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb); + const struct bpf_prog *xdp_prog); +void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog); +int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb); int netif_rx(struct sk_buff *skb); int __netif_rx(struct sk_buff *skb); @@ -4037,6 +4175,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); u8 dev_xdp_prog_count(struct net_device *dev); int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf); +u8 dev_xdp_sb_prog_count(struct net_device *dev); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); u32 dev_get_min_mp_channel_count(const struct net_device *dev); @@ -4248,7 +4387,7 @@ static inline bool netif_carrier_ok(const struct net_device *dev) unsigned long dev_trans_start(struct net_device *dev); -void __netdev_watchdog_up(struct net_device *dev); +void netdev_watchdog_up(struct net_device *dev); void netif_carrier_on(struct net_device *dev); void netif_carrier_off(struct net_device *dev); @@ -4642,6 +4781,9 @@ int devm_register_netdev(struct device *dev, struct net_device *ndev); /* General hardware address lists handling functions */ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len); +int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len); void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len); int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 5897f3dbaf7c..f39f688d7285 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -357,7 +357,7 @@ extern struct static_key xt_tee_enabled; * Begin packet processing : all readers must wait the end * 1) Must be called with preemption disabled * 2) softirqs must be disabled too (or we should use this_cpu_add()) - * Returns : + * Returns: * 1 if no recursion on this cpu * 0 if recursion detected */ diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h index 8676316547cc..3175073a66ba 100644 --- a/include/linux/netfilter_netdev.h +++ b/include/linux/netfilter_netdev.h @@ -66,7 +66,6 @@ static inline bool nf_hook_egress_active(void) * @rc: result code which shall be returned by __dev_queue_xmit() on failure * @dev: netdev whose egress hooks shall be applied to @skb * - * Returns @skb on success or %NULL if the packet was consumed or filtered. * Caller must hold rcu_read_lock. * * On ingress, packets are classified first by tc, then by netfilter. @@ -81,6 +80,8 @@ static inline bool nf_hook_egress_active(void) * called recursively by tunnel drivers such as vxlan, the flag is reverted to * false after sch_handle_egress(). This ensures that netfilter is applied * both on the overlay and underlying network. + * + * Returns: @skb on success or %NULL if the packet was consumed or filtered. */ static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc, struct net_device *dev) diff --git a/include/linux/netfs.h b/include/linux/netfs.h index ecdd5ced16a8..071d05d81d38 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -18,9 +18,11 @@ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/uio.h> +#include <linux/rolling_buffer.h> enum netfs_sreq_ref_trace; typedef struct mempool_s mempool_t; +struct folio_queue; /** * folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED] @@ -71,6 +73,7 @@ struct netfs_inode { #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ #define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */ #define NETFS_ICTX_MODIFIED_ATTR 3 /* Indicate change in mtime/ctime */ +#define NETFS_ICTX_SINGLE_NO_UPLOAD 4 /* Monolithic payload, cache but no upload */ }; /* @@ -178,9 +181,6 @@ struct netfs_io_subrequest { unsigned long long start; /* Where to start the I/O */ size_t len; /* Size of the I/O */ size_t transferred; /* Amount of data transferred */ - size_t consumed; /* Amount of read data consumed */ - size_t prev_donated; /* Amount of data donated from previous subreq */ - size_t next_donated; /* Amount of data donated from next subreq */ refcount_t ref; short error; /* 0 or error that occurred */ unsigned short debug_index; /* Index in list (for debugging output) */ @@ -188,9 +188,6 @@ struct netfs_io_subrequest { u8 retry_count; /* The number of retries (0 on initial pass) */ enum netfs_io_source source; /* Where to read from/write to */ unsigned char stream_nr; /* I/O stream this belongs to */ - unsigned char curr_folioq_slot; /* Folio currently being read */ - unsigned char curr_folio_order; /* Order of folio */ - struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */ unsigned long flags; #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ @@ -208,9 +205,11 @@ enum netfs_io_origin { NETFS_READAHEAD, /* This read was triggered by readahead */ NETFS_READPAGE, /* This read is a synchronous read */ NETFS_READ_GAPS, /* This read is a synchronous read to fill gaps */ + NETFS_READ_SINGLE, /* This read should be treated as a single object */ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ NETFS_DIO_READ, /* This is a direct I/O read */ NETFS_WRITEBACK, /* This write was triggered by writepages */ + NETFS_WRITEBACK_SINGLE, /* This monolithic write was triggered by writepages */ NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */ NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */ NETFS_DIO_WRITE, /* This is a direct I/O write */ @@ -231,16 +230,16 @@ struct netfs_io_request { struct address_space *mapping; /* The mapping being accessed */ struct kiocb *iocb; /* AIO completion vector */ struct netfs_cache_resources cache_resources; + struct netfs_io_request *copy_to_cache; /* Request to write just-read data to the cache */ struct readahead_control *ractl; /* Readahead descriptor */ struct list_head proc_link; /* Link in netfs_iorequests */ - struct list_head subrequests; /* Contributory I/O operations */ struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */ #define NR_IO_STREAMS 2 //wreq->nr_io_streams struct netfs_group *group; /* Writeback group being written back */ - struct folio_queue *buffer; /* Head of I/O buffer */ - struct folio_queue *buffer_tail; /* Tail of I/O buffer */ - struct iov_iter iter; /* Unencrypted-side iterator */ - struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */ + struct rolling_buffer buffer; /* Unencrypted buffer */ +#define NETFS_ROLLBUF_PUT_MARK ROLLBUF_MARK_1 +#define NETFS_ROLLBUF_PAGECACHE_MARK ROLLBUF_MARK_2 + wait_queue_head_t waitq; /* Processor waiter */ void *netfs_priv; /* Private data for the netfs */ void *netfs_priv2; /* Private data for the netfs */ struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */ @@ -251,28 +250,28 @@ struct netfs_io_request { atomic_t subreq_counter; /* Next subreq->debug_index */ unsigned int nr_group_rel; /* Number of refs to release on ->group */ spinlock_t lock; /* Lock for queuing subreqs */ - atomic_t nr_outstanding; /* Number of ops in progress */ unsigned long long submitted; /* Amount submitted for I/O so far */ unsigned long long len; /* Length of the request */ size_t transferred; /* Amount to be indicated as transferred */ long error; /* 0 or error that occurred */ enum netfs_io_origin origin; /* Origin of the request */ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ - u8 buffer_head_slot; /* First slot in ->buffer */ - u8 buffer_tail_slot; /* Next slot in ->buffer_tail */ unsigned long long i_size; /* Size of the file */ unsigned long long start; /* Start position */ atomic64_t issued_to; /* Write issuer folio cursor */ unsigned long long collected_to; /* Point we've collected to */ unsigned long long cleaned_to; /* Position we've cleaned folios to */ + unsigned long long abandon_to; /* Position to abandon folios to */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ - size_t prev_donated; /* Fallback for subreq->prev_donated */ + unsigned char front_folio_order; /* Order (size) of front folio */ refcount_t ref; unsigned long flags; +#define NETFS_RREQ_OFFLOAD_COLLECTION 0 /* Offload collection to workqueue */ #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ #define NETFS_RREQ_FAILED 4 /* The request failed */ #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ +#define NETFS_RREQ_FOLIO_COPY_TO_CACHE 6 /* Copy current folio to cache from read */ #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ #define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */ #define NETFS_RREQ_BLOCKED 10 /* We blocked */ @@ -409,6 +408,13 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * struct netfs_group *netfs_group); ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from); +/* Single, monolithic object read/write API. */ +void netfs_single_mark_inode_dirty(struct inode *inode); +ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter); +int netfs_writeback_single(struct address_space *mapping, + struct writeback_control *wbc, + struct iov_iter *iter); + /* Address operations API */ struct readahead_control; void netfs_readahead(struct readahead_control *); @@ -428,10 +434,8 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp); vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group); /* (Sub)request management API. */ -void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq, - bool was_async); -void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, - int error, bool was_async); +void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq); +void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq); void netfs_get_subrequest(struct netfs_io_subrequest *subreq, enum netfs_sreq_ref_trace what); void netfs_put_subrequest(struct netfs_io_subrequest *subreq, @@ -453,6 +457,18 @@ void netfs_end_io_write(struct inode *inode); int netfs_start_io_direct(struct inode *inode); void netfs_end_io_direct(struct inode *inode); +/* Miscellaneous APIs. */ +struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp, + unsigned int trace /*enum netfs_folioq_trace*/); +void netfs_folioq_free(struct folio_queue *folioq, + unsigned int trace /*enum netfs_trace_folioq*/); + +/* Buffer wrangling helpers API. */ +int netfs_alloc_folioq_buffer(struct address_space *mapping, + struct folio_queue **_buffer, + size_t *_cur_size, ssize_t size, gfp_t gfp); +void netfs_free_folioq_buffer(struct folio_queue *fq); + /** * netfs_inode - Get the netfs inode context from the inode * @inode: The inode to query diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index b34301650c47..f91e50a76efd 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -57,7 +57,7 @@ static inline void netpoll_poll_disable(struct net_device *dev) { return; } static inline void netpoll_poll_enable(struct net_device *dev) { return; } #endif -void netpoll_send_udp(struct netpoll *np, const char *msg, int len); +int netpoll_send_udp(struct netpoll *np, const char *msg, int len); void netpoll_print_options(struct netpoll *np); int netpoll_parse_options(struct netpoll *np, char *opt); int __netpoll_setup(struct netpoll *np, struct net_device *ndev); diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 8d7430d9f218..71fbebfa43c7 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -17,6 +17,7 @@ #include <linux/uidgid.h> #include <uapi/linux/nfs4.h> #include <linux/sunrpc/msg_prot.h> +#include <linux/sunrpc/xdrgen/nfs4_1.h> enum nfs4_acl_whotype { NFS4_ACL_WHO_NAMED = 0, @@ -365,7 +366,7 @@ enum limit_by4 { NFS4_LIMIT_BLOCKS = 2 }; -enum open_delegation_type4 { +enum nfs4_open_delegation_type4 { NFS4_OPEN_DELEGATE_NONE = 0, NFS4_OPEN_DELEGATE_READ = 1, NFS4_OPEN_DELEGATE_WRITE = 2, @@ -512,12 +513,6 @@ enum { FATTR4_XATTR_SUPPORT = 82, }; -enum { - FATTR4_TIME_DELEG_ACCESS = 84, - FATTR4_TIME_DELEG_MODIFY = 85, - FATTR4_OPEN_ARGUMENTS = 86, -}; - /* * The following internal definitions enable processing the above * attribute bits within 32-bit word boundaries. diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 559273a0f16d..e74a87bb18a4 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1315,11 +1315,6 @@ struct nfs4_fsid_present_res { #endif /* CONFIG_NFS_V4 */ -struct nfstime4 { - u64 seconds; - u32 nseconds; -}; - #ifdef CONFIG_NFS_V4_1 struct pnfs_commit_bucket { diff --git a/include/linux/numa_memblks.h b/include/linux/numa_memblks.h index cfad6ce7e1bd..dd85613cdd86 100644 --- a/include/linux/numa_memblks.h +++ b/include/linux/numa_memblks.h @@ -29,7 +29,10 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi); int __init numa_memblks_init(int (*init_func)(void), bool memblock_force_top_down); +extern int numa_distance_cnt; + #ifdef CONFIG_NUMA_EMU +extern int emu_nid_to_phys[MAX_NUMNODES]; int numa_emu_cmdline(char *str); void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, unsigned int nr_emu_nids); diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 13377dde4527..fe3b60818fdc 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -64,6 +64,7 @@ enum { /* Transport Type codes for Discovery Log Page entry TRTYPE field */ enum { + NVMF_TRTYPE_PCI = 0, /* PCI */ NVMF_TRTYPE_RDMA = 1, /* RDMA */ NVMF_TRTYPE_FC = 2, /* Fibre Channel */ NVMF_TRTYPE_TCP = 3, /* TCP/IP */ @@ -275,6 +276,7 @@ enum nvme_ctrl_attr { NVME_CTRL_ATTR_HID_128_BIT = (1 << 0), NVME_CTRL_ATTR_TBKAS = (1 << 6), NVME_CTRL_ATTR_ELBAS = (1 << 15), + NVME_CTRL_ATTR_RHII = (1 << 18), }; struct nvme_id_ctrl { @@ -1896,6 +1898,46 @@ static inline bool nvme_is_fabrics(const struct nvme_command *cmd) return cmd->common.opcode == nvme_fabrics_command; } +#ifdef CONFIG_NVME_VERBOSE_ERRORS +const char *nvme_get_error_status_str(u16 status); +const char *nvme_get_opcode_str(u8 opcode); +const char *nvme_get_admin_opcode_str(u8 opcode); +const char *nvme_get_fabrics_opcode_str(u8 opcode); +#else /* CONFIG_NVME_VERBOSE_ERRORS */ +static inline const char *nvme_get_error_status_str(u16 status) +{ + return "I/O Error"; +} +static inline const char *nvme_get_opcode_str(u8 opcode) +{ + return "I/O Cmd"; +} +static inline const char *nvme_get_admin_opcode_str(u8 opcode) +{ + return "Admin Cmd"; +} + +static inline const char *nvme_get_fabrics_opcode_str(u8 opcode) +{ + return "Fabrics Cmd"; +} +#endif /* CONFIG_NVME_VERBOSE_ERRORS */ + +static inline const char *nvme_opcode_str(int qid, u8 opcode) +{ + return qid ? nvme_get_opcode_str(opcode) : + nvme_get_admin_opcode_str(opcode); +} + +static inline const char *nvme_fabrics_opcode_str( + int qid, const struct nvme_command *cmd) +{ + if (nvme_is_fabrics(cmd)) + return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype); + + return nvme_opcode_str(qid, cmd->common.opcode); +} + struct nvme_error_slot { __le64 error_count; __le16 sqid; diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 3ebeaa0ded00..515676ebe598 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -92,8 +92,8 @@ struct nvmem_cell_info { * @read_only: Device is read-only. * @root_only: Device is accessibly to root only. * @of_node: If given, this will be used instead of the parent's of_node. - * @reg_read: Callback to read data. - * @reg_write: Callback to write data. + * @reg_read: Callback to read data; return zero if successful. + * @reg_write: Callback to write data; return zero if successful. * @size: Device size. * @word_size: Minimum read/write access granularity. * @stride: Minimum read/write access stride. diff --git a/include/linux/objtool.h b/include/linux/objtool.h index b3b8d3dab52d..c722a921165b 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -45,29 +45,25 @@ #define STACK_FRAME_NON_STANDARD_FP(func) #endif -#define ANNOTATE_NOENDBR \ - "986: \n\t" \ - ".pushsection .discard.noendbr\n\t" \ - ".long 986b\n\t" \ - ".popsection\n\t" - #define ASM_REACHABLE \ "998:\n\t" \ ".pushsection .discard.reachable\n\t" \ ".long 998b\n\t" \ ".popsection\n\t" -#else /* __ASSEMBLY__ */ +#define __ASM_BREF(label) label ## b -/* - * This macro indicates that the following intra-function call is valid. - * Any non-annotated intra-function call will cause objtool to issue a warning. - */ -#define ANNOTATE_INTRA_FUNCTION_CALL \ - 999: \ - .pushsection .discard.intra_function_calls; \ - .long 999b; \ - .popsection; +#define __ASM_ANNOTATE(label, type) \ + ".pushsection .discard.annotate_insn,\"M\",@progbits,8\n\t" \ + ".long " __stringify(label) " - .\n\t" \ + ".long " __stringify(type) "\n\t" \ + ".popsection\n\t" + +#define ASM_ANNOTATE(type) \ + "911:\n\t" \ + __ASM_ANNOTATE(911b, type) + +#else /* __ASSEMBLY__ */ /* * In asm, there are two kinds of code: normal C-type callable functions and @@ -115,34 +111,11 @@ #endif .endm -.macro ANNOTATE_NOENDBR +.macro ANNOTATE type:req .Lhere_\@: - .pushsection .discard.noendbr - .long .Lhere_\@ - .popsection -.endm - -/* - * Use objtool to validate the entry requirement that all code paths do - * VALIDATE_UNRET_END before RET. - * - * NOTE: The macro must be used at the beginning of a global symbol, otherwise - * it will be ignored. - */ -.macro VALIDATE_UNRET_BEGIN -#if defined(CONFIG_NOINSTR_VALIDATION) && \ - (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)) -.Lhere_\@: - .pushsection .discard.validate_unret + .pushsection .discard.annotate_insn,"M",@progbits,8 .long .Lhere_\@ - . - .popsection -#endif -.endm - -.macro REACHABLE -.Lhere_\@: - .pushsection .discard.reachable - .long .Lhere_\@ + .long \type .popsection .endm @@ -155,20 +128,77 @@ #define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" #define STACK_FRAME_NON_STANDARD(func) #define STACK_FRAME_NON_STANDARD_FP(func) -#define ANNOTATE_NOENDBR -#define ASM_REACHABLE +#define __ASM_ANNOTATE(label, type) +#define ASM_ANNOTATE(type) #else -#define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 .endm .macro STACK_FRAME_NON_STANDARD func:req .endm -.macro ANNOTATE_NOENDBR -.endm -.macro REACHABLE +.macro ANNOTATE type:req .endm #endif #endif /* CONFIG_OBJTOOL */ +#ifndef __ASSEMBLY__ +/* + * Annotate away the various 'relocation to !ENDBR` complaints; knowing that + * these relocations will never be used for indirect calls. + */ +#define ANNOTATE_NOENDBR ASM_ANNOTATE(ANNOTYPE_NOENDBR) +/* + * This should be used immediately before an indirect jump/call. It tells + * objtool the subsequent indirect jump/call is vouched safe for retpoline + * builds. + */ +#define ANNOTATE_RETPOLINE_SAFE ASM_ANNOTATE(ANNOTYPE_RETPOLINE_SAFE) +/* + * See linux/instrumentation.h + */ +#define ANNOTATE_INSTR_BEGIN(label) __ASM_ANNOTATE(label, ANNOTYPE_INSTR_BEGIN) +#define ANNOTATE_INSTR_END(label) __ASM_ANNOTATE(label, ANNOTYPE_INSTR_END) +/* + * objtool annotation to ignore the alternatives and only consider the original + * instruction(s). + */ +#define ANNOTATE_IGNORE_ALTERNATIVE ASM_ANNOTATE(ANNOTYPE_IGNORE_ALTS) +/* + * This macro indicates that the following intra-function call is valid. + * Any non-annotated intra-function call will cause objtool to issue a warning. + */ +#define ANNOTATE_INTRA_FUNCTION_CALL ASM_ANNOTATE(ANNOTYPE_INTRA_FUNCTION_CALL) +/* + * Use objtool to validate the entry requirement that all code paths do + * VALIDATE_UNRET_END before RET. + * + * NOTE: The macro must be used at the beginning of a global symbol, otherwise + * it will be ignored. + */ +#define ANNOTATE_UNRET_BEGIN ASM_ANNOTATE(ANNOTYPE_UNRET_BEGIN) +/* + * This should be used to refer to an instruction that is considered + * terminating, like a noreturn CALL or UD2 when we know they are not -- eg + * WARN using UD2. + */ +#define ANNOTATE_REACHABLE(label) __ASM_ANNOTATE(label, ANNOTYPE_REACHABLE) + +#else +#define ANNOTATE_NOENDBR ANNOTATE type=ANNOTYPE_NOENDBR +#define ANNOTATE_RETPOLINE_SAFE ANNOTATE type=ANNOTYPE_RETPOLINE_SAFE +/* ANNOTATE_INSTR_BEGIN ANNOTATE type=ANNOTYPE_INSTR_BEGIN */ +/* ANNOTATE_INSTR_END ANNOTATE type=ANNOTYPE_INSTR_END */ +#define ANNOTATE_IGNORE_ALTERNATIVE ANNOTATE type=ANNOTYPE_IGNORE_ALTS +#define ANNOTATE_INTRA_FUNCTION_CALL ANNOTATE type=ANNOTYPE_INTRA_FUNCTION_CALL +#define ANNOTATE_UNRET_BEGIN ANNOTATE type=ANNOTYPE_UNRET_BEGIN +#define ANNOTATE_REACHABLE ANNOTATE type=ANNOTYPE_REACHABLE +#endif + +#if defined(CONFIG_NOINSTR_VALIDATION) && \ + (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)) +#define VALIDATE_UNRET_BEGIN ANNOTATE_UNRET_BEGIN +#else +#define VALIDATE_UNRET_BEGIN +#endif + #endif /* _LINUX_OBJTOOL_H */ diff --git a/include/linux/objtool_types.h b/include/linux/objtool_types.h index 453a4f4ef39d..df5d9fa84dba 100644 --- a/include/linux/objtool_types.h +++ b/include/linux/objtool_types.h @@ -54,4 +54,16 @@ struct unwind_hint { #define UNWIND_HINT_TYPE_SAVE 6 #define UNWIND_HINT_TYPE_RESTORE 7 +/* + * Annotate types + */ +#define ANNOTYPE_NOENDBR 1 +#define ANNOTYPE_RETPOLINE_SAFE 2 +#define ANNOTYPE_INSTR_BEGIN 3 +#define ANNOTYPE_INSTR_END 4 +#define ANNOTYPE_UNRET_BEGIN 5 +#define ANNOTYPE_IGNORE_ALTS 6 +#define ANNOTYPE_INTRA_FUNCTION_CALL 7 +#define ANNOTYPE_REACHABLE 8 + #endif /* _LINUX_OBJTOOL_TYPES_H */ diff --git a/include/linux/of.h b/include/linux/of.h index f921786cb8ac..eaf0e2a2b75c 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -311,6 +311,7 @@ extern struct device_node *of_find_node_with_property( extern struct property *of_find_property(const struct device_node *np, const char *name, int *lenp); +extern bool of_property_read_bool(const struct device_node *np, const char *propname); extern int of_property_count_elems_of_size(const struct device_node *np, const char *propname, int elem_size); extern int of_property_read_u32_index(const struct device_node *np, @@ -397,7 +398,6 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it, uint32_t *args, int size); -extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); extern int of_alias_get_id(const struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); @@ -615,6 +615,12 @@ static inline struct device_node *of_find_compatible_node( return NULL; } +static inline bool of_property_read_bool(const struct device_node *np, + const char *propname) +{ + return false; +} + static inline int of_property_count_elems_of_size(const struct device_node *np, const char *propname, int elem_size) { @@ -1243,24 +1249,6 @@ static inline int of_property_read_string_index(const struct device_node *np, } /** - * of_property_read_bool - Find a property - * @np: device node from which the property value is to be read. - * @propname: name of the property to be searched. - * - * Search for a boolean property in a device node. Usage on non-boolean - * property types is deprecated. - * - * Return: true if the property exists false otherwise. - */ -static inline bool of_property_read_bool(const struct device_node *np, - const char *propname) -{ - const struct property *prop = of_find_property(np, propname, NULL); - - return prop ? true : false; -} - -/** * of_property_present - Test if a property is present in a node * @np: device node to search for the property. * @propname: name of the property to be searched. @@ -1271,7 +1259,9 @@ static inline bool of_property_read_bool(const struct device_node *np, */ static inline bool of_property_present(const struct device_node *np, const char *propname) { - return of_property_read_bool(np, propname); + struct property *prop = of_find_property(np, propname, NULL); + + return prop ? true : false; } /** diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 9e034363788a..0cff90365391 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -26,6 +26,7 @@ struct of_pci_range { u64 bus_addr; }; u64 cpu_addr; + u64 parent_bus_addr; u64 size; u32 flags; }; diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index a2ff1ad48f7f..17471ef8e092 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -47,8 +47,6 @@ struct of_dev_auxdata { { .compatible = _compat, .phys_addr = _phys, .name = _name, \ .platform_data = _pdata } -extern const struct of_device_id of_default_bus_match_table[]; - /* Platform drivers register/unregister */ extern struct platform_device *of_device_alloc(struct device_node *np, const char *bus_id, diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h index c9e3843d2dd5..263b915df1fb 100644 --- a/include/linux/omap-gpmc.h +++ b/include/linux/omap-gpmc.h @@ -66,10 +66,6 @@ extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, struct device_node; -extern int gpmc_get_client_irq(unsigned irq_config); - -extern unsigned int gpmc_ticks_to_ns(unsigned int ticks); - extern void gpmc_cs_write_reg(int cs, int idx, u32 val); extern int gpmc_calc_divider(unsigned int sync_clk); extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t, diff --git a/include/linux/packing.h b/include/linux/packing.h index 5d36dcd06f60..0589d70bbe04 100644 --- a/include/linux/packing.h +++ b/include/linux/packing.h @@ -8,6 +8,83 @@ #include <linux/types.h> #include <linux/bitops.h> +#define GEN_PACKED_FIELD_STRUCT(__type) \ + struct packed_field_ ## __type { \ + __type startbit; \ + __type endbit; \ + __type offset; \ + __type size; \ + } + +/* struct packed_field_u8. Use with bit offsets < 256, buffers < 32B and + * unpacked structures < 256B. + */ +GEN_PACKED_FIELD_STRUCT(u8); + +/* struct packed_field_u16. Use with bit offsets < 65536, buffers < 8KB and + * unpacked structures < 64KB. + */ +GEN_PACKED_FIELD_STRUCT(u16); + +#define PACKED_FIELD(start, end, struct_name, struct_field) \ +{ \ + (start), \ + (end), \ + offsetof(struct_name, struct_field), \ + sizeof_field(struct_name, struct_field), \ +} + +#define CHECK_PACKED_FIELD_OVERLAP(fields, index1, index2) ({ \ + typeof(&(fields)[0]) __f = (fields); \ + typeof(__f[0]) _f1 = __f[index1]; typeof(__f[0]) _f2 = __f[index2]; \ + const bool _ascending = __f[0].startbit < __f[1].startbit; \ + BUILD_BUG_ON_MSG(_ascending && _f1.startbit >= _f2.startbit, \ + __stringify(fields) " field " __stringify(index2) \ + " breaks ascending order"); \ + BUILD_BUG_ON_MSG(!_ascending && _f1.startbit <= _f2.startbit, \ + __stringify(fields) " field " __stringify(index2) \ + " breaks descending order"); \ + BUILD_BUG_ON_MSG(max(_f1.endbit, _f2.endbit) <= \ + min(_f1.startbit, _f2.startbit), \ + __stringify(fields) " field " __stringify(index2) \ + " overlaps with previous field"); \ +}) + +#define CHECK_PACKED_FIELD(fields, index) ({ \ + typeof(&(fields)[0]) _f = (fields); \ + typeof(_f[0]) __f = _f[index]; \ + BUILD_BUG_ON_MSG(__f.startbit < __f.endbit, \ + __stringify(fields) " field " __stringify(index) \ + " start bit must not be smaller than end bit"); \ + BUILD_BUG_ON_MSG(__f.size != 1 && __f.size != 2 && \ + __f.size != 4 && __f.size != 8, \ + __stringify(fields) " field " __stringify(index) \ + " has unsupported unpacked storage size"); \ + BUILD_BUG_ON_MSG(__f.startbit - __f.endbit >= BITS_PER_BYTE * __f.size, \ + __stringify(fields) " field " __stringify(index) \ + " exceeds unpacked storage size"); \ + __builtin_choose_expr(index != 0, \ + CHECK_PACKED_FIELD_OVERLAP(fields, index - 1, index), \ + 1); \ +}) + +/* Note that the packed fields may be either in ascending or descending order. + * Thus, we must check that both the first and last field wit within the + * packed buffer size. + */ +#define CHECK_PACKED_FIELDS_SIZE(fields, pbuflen) ({ \ + typeof(&(fields)[0]) _f = (fields); \ + typeof(pbuflen) _len = (pbuflen); \ + const size_t num_fields = ARRAY_SIZE(fields); \ + BUILD_BUG_ON_MSG(!__builtin_constant_p(_len), \ + __stringify(fields) " pbuflen " __stringify(pbuflen) \ + " must be a compile time constant"); \ + BUILD_BUG_ON_MSG(_f[0].startbit >= BITS_PER_BYTE * _len, \ + __stringify(fields) " first field exceeds packed buffer size"); \ + BUILD_BUG_ON_MSG(_f[num_fields - 1].startbit >= BITS_PER_BYTE * _len, \ + __stringify(fields) " last field exceeds packed buffer size"); \ +}) + #define QUIRK_MSB_ON_THE_RIGHT BIT(0) #define QUIRK_LITTLE_ENDIAN BIT(1) #define QUIRK_LSW32_IS_FIRST BIT(2) @@ -26,4 +103,352 @@ int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen, int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, size_t pbuflen, u8 quirks); +void pack_fields_u8(void *pbuf, size_t pbuflen, const void *ustruct, + const struct packed_field_u8 *fields, size_t num_fields, + u8 quirks); + +void pack_fields_u16(void *pbuf, size_t pbuflen, const void *ustruct, + const struct packed_field_u16 *fields, size_t num_fields, + u8 quirks); + +void unpack_fields_u8(const void *pbuf, size_t pbuflen, void *ustruct, + const struct packed_field_u8 *fields, size_t num_fields, + u8 quirks); + +void unpack_fields_u16(const void *pbuf, size_t pbuflen, void *ustruct, + const struct packed_field_u16 *fields, size_t num_fields, + u8 quirks); + +/* Do not hand-edit the following packed field check macros! + * + * They are generated using scripts/gen_packed_field_checks.c, which may be + * built via "make scripts_gen_packed_field_checks". If larger macro sizes are + * needed in the future, please use this program to re-generate the macros and + * insert them here. + */ + +#define CHECK_PACKED_FIELDS_1(fields) \ + CHECK_PACKED_FIELD(fields, 0) + +#define CHECK_PACKED_FIELDS_2(fields) do { \ + CHECK_PACKED_FIELDS_1(fields); \ + CHECK_PACKED_FIELD(fields, 1); \ +} while (0) + +#define CHECK_PACKED_FIELDS_3(fields) do { \ + CHECK_PACKED_FIELDS_2(fields); \ + CHECK_PACKED_FIELD(fields, 2); \ +} while (0) + +#define CHECK_PACKED_FIELDS_4(fields) do { \ + CHECK_PACKED_FIELDS_3(fields); \ + CHECK_PACKED_FIELD(fields, 3); \ +} while (0) + +#define CHECK_PACKED_FIELDS_5(fields) do { \ + CHECK_PACKED_FIELDS_4(fields); \ + CHECK_PACKED_FIELD(fields, 4); \ +} while (0) + +#define CHECK_PACKED_FIELDS_6(fields) do { \ + CHECK_PACKED_FIELDS_5(fields); \ + CHECK_PACKED_FIELD(fields, 5); \ +} while (0) + +#define CHECK_PACKED_FIELDS_7(fields) do { \ + CHECK_PACKED_FIELDS_6(fields); \ + CHECK_PACKED_FIELD(fields, 6); \ +} while (0) + +#define CHECK_PACKED_FIELDS_8(fields) do { \ + CHECK_PACKED_FIELDS_7(fields); \ + CHECK_PACKED_FIELD(fields, 7); \ +} while (0) + +#define CHECK_PACKED_FIELDS_9(fields) do { \ + CHECK_PACKED_FIELDS_8(fields); \ + CHECK_PACKED_FIELD(fields, 8); \ +} while (0) + +#define CHECK_PACKED_FIELDS_10(fields) do { \ + CHECK_PACKED_FIELDS_9(fields); \ + CHECK_PACKED_FIELD(fields, 9); \ +} while (0) + +#define CHECK_PACKED_FIELDS_11(fields) do { \ + CHECK_PACKED_FIELDS_10(fields); \ + CHECK_PACKED_FIELD(fields, 10); \ +} while (0) + +#define CHECK_PACKED_FIELDS_12(fields) do { \ + CHECK_PACKED_FIELDS_11(fields); \ + CHECK_PACKED_FIELD(fields, 11); \ +} while (0) + +#define CHECK_PACKED_FIELDS_13(fields) do { \ + CHECK_PACKED_FIELDS_12(fields); \ + CHECK_PACKED_FIELD(fields, 12); \ +} while (0) + +#define CHECK_PACKED_FIELDS_14(fields) do { \ + CHECK_PACKED_FIELDS_13(fields); \ + CHECK_PACKED_FIELD(fields, 13); \ +} while (0) + +#define CHECK_PACKED_FIELDS_15(fields) do { \ + CHECK_PACKED_FIELDS_14(fields); \ + CHECK_PACKED_FIELD(fields, 14); \ +} while (0) + +#define CHECK_PACKED_FIELDS_16(fields) do { \ + CHECK_PACKED_FIELDS_15(fields); \ + CHECK_PACKED_FIELD(fields, 15); \ +} while (0) + +#define CHECK_PACKED_FIELDS_17(fields) do { \ + CHECK_PACKED_FIELDS_16(fields); \ + CHECK_PACKED_FIELD(fields, 16); \ +} while (0) + +#define CHECK_PACKED_FIELDS_18(fields) do { \ + CHECK_PACKED_FIELDS_17(fields); \ + CHECK_PACKED_FIELD(fields, 17); \ +} while (0) + +#define CHECK_PACKED_FIELDS_19(fields) do { \ + CHECK_PACKED_FIELDS_18(fields); \ + CHECK_PACKED_FIELD(fields, 18); \ +} while (0) + +#define CHECK_PACKED_FIELDS_20(fields) do { \ + CHECK_PACKED_FIELDS_19(fields); \ + CHECK_PACKED_FIELD(fields, 19); \ +} while (0) + +#define CHECK_PACKED_FIELDS_21(fields) do { \ + CHECK_PACKED_FIELDS_20(fields); \ + CHECK_PACKED_FIELD(fields, 20); \ +} while (0) + +#define CHECK_PACKED_FIELDS_22(fields) do { \ + CHECK_PACKED_FIELDS_21(fields); \ + CHECK_PACKED_FIELD(fields, 21); \ +} while (0) + +#define CHECK_PACKED_FIELDS_23(fields) do { \ + CHECK_PACKED_FIELDS_22(fields); \ + CHECK_PACKED_FIELD(fields, 22); \ +} while (0) + +#define CHECK_PACKED_FIELDS_24(fields) do { \ + CHECK_PACKED_FIELDS_23(fields); \ + CHECK_PACKED_FIELD(fields, 23); \ +} while (0) + +#define CHECK_PACKED_FIELDS_25(fields) do { \ + CHECK_PACKED_FIELDS_24(fields); \ + CHECK_PACKED_FIELD(fields, 24); \ +} while (0) + +#define CHECK_PACKED_FIELDS_26(fields) do { \ + CHECK_PACKED_FIELDS_25(fields); \ + CHECK_PACKED_FIELD(fields, 25); \ +} while (0) + +#define CHECK_PACKED_FIELDS_27(fields) do { \ + CHECK_PACKED_FIELDS_26(fields); \ + CHECK_PACKED_FIELD(fields, 26); \ +} while (0) + +#define CHECK_PACKED_FIELDS_28(fields) do { \ + CHECK_PACKED_FIELDS_27(fields); \ + CHECK_PACKED_FIELD(fields, 27); \ +} while (0) + +#define CHECK_PACKED_FIELDS_29(fields) do { \ + CHECK_PACKED_FIELDS_28(fields); \ + CHECK_PACKED_FIELD(fields, 28); \ +} while (0) + +#define CHECK_PACKED_FIELDS_30(fields) do { \ + CHECK_PACKED_FIELDS_29(fields); \ + CHECK_PACKED_FIELD(fields, 29); \ +} while (0) + +#define CHECK_PACKED_FIELDS_31(fields) do { \ + CHECK_PACKED_FIELDS_30(fields); \ + CHECK_PACKED_FIELD(fields, 30); \ +} while (0) + +#define CHECK_PACKED_FIELDS_32(fields) do { \ + CHECK_PACKED_FIELDS_31(fields); \ + CHECK_PACKED_FIELD(fields, 31); \ +} while (0) + +#define CHECK_PACKED_FIELDS_33(fields) do { \ + CHECK_PACKED_FIELDS_32(fields); \ + CHECK_PACKED_FIELD(fields, 32); \ +} while (0) + +#define CHECK_PACKED_FIELDS_34(fields) do { \ + CHECK_PACKED_FIELDS_33(fields); \ + CHECK_PACKED_FIELD(fields, 33); \ +} while (0) + +#define CHECK_PACKED_FIELDS_35(fields) do { \ + CHECK_PACKED_FIELDS_34(fields); \ + CHECK_PACKED_FIELD(fields, 34); \ +} while (0) + +#define CHECK_PACKED_FIELDS_36(fields) do { \ + CHECK_PACKED_FIELDS_35(fields); \ + CHECK_PACKED_FIELD(fields, 35); \ +} while (0) + +#define CHECK_PACKED_FIELDS_37(fields) do { \ + CHECK_PACKED_FIELDS_36(fields); \ + CHECK_PACKED_FIELD(fields, 36); \ +} while (0) + +#define CHECK_PACKED_FIELDS_38(fields) do { \ + CHECK_PACKED_FIELDS_37(fields); \ + CHECK_PACKED_FIELD(fields, 37); \ +} while (0) + +#define CHECK_PACKED_FIELDS_39(fields) do { \ + CHECK_PACKED_FIELDS_38(fields); \ + CHECK_PACKED_FIELD(fields, 38); \ +} while (0) + +#define CHECK_PACKED_FIELDS_40(fields) do { \ + CHECK_PACKED_FIELDS_39(fields); \ + CHECK_PACKED_FIELD(fields, 39); \ +} while (0) + +#define CHECK_PACKED_FIELDS_41(fields) do { \ + CHECK_PACKED_FIELDS_40(fields); \ + CHECK_PACKED_FIELD(fields, 40); \ +} while (0) + +#define CHECK_PACKED_FIELDS_42(fields) do { \ + CHECK_PACKED_FIELDS_41(fields); \ + CHECK_PACKED_FIELD(fields, 41); \ +} while (0) + +#define CHECK_PACKED_FIELDS_43(fields) do { \ + CHECK_PACKED_FIELDS_42(fields); \ + CHECK_PACKED_FIELD(fields, 42); \ +} while (0) + +#define CHECK_PACKED_FIELDS_44(fields) do { \ + CHECK_PACKED_FIELDS_43(fields); \ + CHECK_PACKED_FIELD(fields, 43); \ +} while (0) + +#define CHECK_PACKED_FIELDS_45(fields) do { \ + CHECK_PACKED_FIELDS_44(fields); \ + CHECK_PACKED_FIELD(fields, 44); \ +} while (0) + +#define CHECK_PACKED_FIELDS_46(fields) do { \ + CHECK_PACKED_FIELDS_45(fields); \ + CHECK_PACKED_FIELD(fields, 45); \ +} while (0) + +#define CHECK_PACKED_FIELDS_47(fields) do { \ + CHECK_PACKED_FIELDS_46(fields); \ + CHECK_PACKED_FIELD(fields, 46); \ +} while (0) + +#define CHECK_PACKED_FIELDS_48(fields) do { \ + CHECK_PACKED_FIELDS_47(fields); \ + CHECK_PACKED_FIELD(fields, 47); \ +} while (0) + +#define CHECK_PACKED_FIELDS_49(fields) do { \ + CHECK_PACKED_FIELDS_48(fields); \ + CHECK_PACKED_FIELD(fields, 48); \ +} while (0) + +#define CHECK_PACKED_FIELDS_50(fields) do { \ + CHECK_PACKED_FIELDS_49(fields); \ + CHECK_PACKED_FIELD(fields, 49); \ +} while (0) + +#define CHECK_PACKED_FIELDS(fields) \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 1, ({ CHECK_PACKED_FIELDS_1(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 2, ({ CHECK_PACKED_FIELDS_2(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 3, ({ CHECK_PACKED_FIELDS_3(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 4, ({ CHECK_PACKED_FIELDS_4(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 5, ({ CHECK_PACKED_FIELDS_5(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 6, ({ CHECK_PACKED_FIELDS_6(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 7, ({ CHECK_PACKED_FIELDS_7(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 8, ({ CHECK_PACKED_FIELDS_8(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 9, ({ CHECK_PACKED_FIELDS_9(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 10, ({ CHECK_PACKED_FIELDS_10(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 11, ({ CHECK_PACKED_FIELDS_11(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 12, ({ CHECK_PACKED_FIELDS_12(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 13, ({ CHECK_PACKED_FIELDS_13(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 14, ({ CHECK_PACKED_FIELDS_14(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 15, ({ CHECK_PACKED_FIELDS_15(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 16, ({ CHECK_PACKED_FIELDS_16(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 17, ({ CHECK_PACKED_FIELDS_17(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 18, ({ CHECK_PACKED_FIELDS_18(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 19, ({ CHECK_PACKED_FIELDS_19(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 20, ({ CHECK_PACKED_FIELDS_20(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 21, ({ CHECK_PACKED_FIELDS_21(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 22, ({ CHECK_PACKED_FIELDS_22(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 23, ({ CHECK_PACKED_FIELDS_23(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 24, ({ CHECK_PACKED_FIELDS_24(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 25, ({ CHECK_PACKED_FIELDS_25(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 26, ({ CHECK_PACKED_FIELDS_26(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 27, ({ CHECK_PACKED_FIELDS_27(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 28, ({ CHECK_PACKED_FIELDS_28(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 29, ({ CHECK_PACKED_FIELDS_29(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 30, ({ CHECK_PACKED_FIELDS_30(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 31, ({ CHECK_PACKED_FIELDS_31(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 32, ({ CHECK_PACKED_FIELDS_32(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 33, ({ CHECK_PACKED_FIELDS_33(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 34, ({ CHECK_PACKED_FIELDS_34(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 35, ({ CHECK_PACKED_FIELDS_35(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 36, ({ CHECK_PACKED_FIELDS_36(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 37, ({ CHECK_PACKED_FIELDS_37(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 38, ({ CHECK_PACKED_FIELDS_38(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 39, ({ CHECK_PACKED_FIELDS_39(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 40, ({ CHECK_PACKED_FIELDS_40(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 41, ({ CHECK_PACKED_FIELDS_41(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 42, ({ CHECK_PACKED_FIELDS_42(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 43, ({ CHECK_PACKED_FIELDS_43(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 44, ({ CHECK_PACKED_FIELDS_44(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 45, ({ CHECK_PACKED_FIELDS_45(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 46, ({ CHECK_PACKED_FIELDS_46(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 47, ({ CHECK_PACKED_FIELDS_47(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 48, ({ CHECK_PACKED_FIELDS_48(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 49, ({ CHECK_PACKED_FIELDS_49(fields); }), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 50, ({ CHECK_PACKED_FIELDS_50(fields); }), \ + ({ BUILD_BUG_ON_MSG(1, "CHECK_PACKED_FIELDS() must be regenerated to support array sizes larger than 50."); }) \ +)))))))))))))))))))))))))))))))))))))))))))))))))) + +/* End of generated content */ + +#define pack_fields(pbuf, pbuflen, ustruct, fields, quirks) \ + ({ \ + CHECK_PACKED_FIELDS(fields); \ + CHECK_PACKED_FIELDS_SIZE((fields), (pbuflen)); \ + _Generic((fields), \ + const struct packed_field_u8 * : pack_fields_u8, \ + const struct packed_field_u16 * : pack_fields_u16 \ + )((pbuf), (pbuflen), (ustruct), (fields), ARRAY_SIZE(fields), (quirks)); \ + }) + +#define unpack_fields(pbuf, pbuflen, ustruct, fields, quirks) \ + ({ \ + CHECK_PACKED_FIELDS(fields); \ + CHECK_PACKED_FIELDS_SIZE((fields), (pbuflen)); \ + _Generic((fields), \ + const struct packed_field_u8 * : unpack_fields_u8, \ + const struct packed_field_u16 * : unpack_fields_u16 \ + )((pbuf), (pbuflen), (ustruct), (fields), ARRAY_SIZE(fields), (quirks)); \ + }) + #endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 691506bdf2c5..36d283552f80 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -110,6 +110,7 @@ enum pageflags { PG_reclaim, /* To be reclaimed asap */ PG_swapbacked, /* Page is backed by RAM/swap */ PG_unevictable, /* Page is "unevictable" */ + PG_dropbehind, /* drop pages on IO completion */ #ifdef CONFIG_MMU PG_mlocked, /* Page is vma mlocked */ #endif @@ -562,6 +563,10 @@ PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE) FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE) +FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE) + FOLIO_TEST_CLEAR_FLAG(dropbehind, FOLIO_HEAD_PAGE) + __FOLIO_SET_FLAG(dropbehind, FOLIO_HEAD_PAGE) + #ifdef CONFIG_HIGHMEM /* * Must use a macro here due to header dependency issues. page_zone() is not @@ -894,21 +899,9 @@ static inline int PageTransCompound(const struct page *page) { return PageCompound(page); } - -/* - * PageTransTail returns true for both transparent huge pages - * and hugetlbfs pages, so it should only be called when it's known - * that hugetlbfs pages aren't involved. - */ -static inline int PageTransTail(const struct page *page) -{ - return PageTail(page); -} #else TESTPAGEFLAG_FALSE(TransHuge, transhuge) TESTPAGEFLAG_FALSE(TransCompound, transcompound) -TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap) -TESTPAGEFLAG_FALSE(TransTail, transtail) #endif #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) @@ -918,11 +911,9 @@ TESTPAGEFLAG_FALSE(TransTail, transtail) * * This flag is set by hwpoison handler. Cleared by THP split or free page. */ -PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) - TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) +FOLIO_FLAG(has_hwpoisoned, FOLIO_SECOND_PAGE) #else -PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) - TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) +FOLIO_FLAG_FALSE(has_hwpoisoned) #endif /* diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 73dc2c1841ec..898bb788243b 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -31,7 +31,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page, int migratetype); int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - int migratetype, int flags, gfp_t gfp_flags); + int migratetype, int flags); void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int migratetype); diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 79dbd8bc35a7..46406f3fe34d 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -96,7 +96,7 @@ static inline void page_counter_reset_watermark(struct page_counter *counter) counter->watermark = usage; } -#ifdef CONFIG_MEMCG +#if IS_ENABLED(CONFIG_MEMCG) || IS_ENABLED(CONFIG_CGROUP_DMEM) void page_counter_calculate_protection(struct page_counter *root, struct page_counter *counter, bool recursive_protection); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index bcf0865a38ae..47bfc6b1b632 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -710,6 +710,7 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping, * * %FGP_NOFS - __GFP_FS will get cleared in gfp. * * %FGP_NOWAIT - Don't block on the folio lock. * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) + * * %FGP_DONTCACHE - Uncached buffered IO * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin() * implementation. */ @@ -723,10 +724,21 @@ typedef unsigned int __bitwise fgf_t; #define FGP_NOWAIT ((__force fgf_t)0x00000020) #define FGP_FOR_MMAP ((__force fgf_t)0x00000040) #define FGP_STABLE ((__force fgf_t)0x00000080) +#define FGP_DONTCACHE ((__force fgf_t)0x00000100) #define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */ #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) +static inline unsigned int filemap_get_order(size_t size) +{ + unsigned int shift = ilog2(size); + + if (shift <= PAGE_SHIFT) + return 0; + + return shift - PAGE_SHIFT; +} + /** * fgf_set_order - Encode a length in the fgf_t flags. * @size: The suggested size of the folio to create. @@ -740,11 +752,11 @@ typedef unsigned int __bitwise fgf_t; */ static inline fgf_t fgf_set_order(size_t size) { - unsigned int shift = ilog2(size); + unsigned int order = filemap_get_order(size); - if (shift <= PAGE_SHIFT) + if (!order) return 0; - return (__force fgf_t)((shift - PAGE_SHIFT) << 26); + return (__force fgf_t)(order << 26); } void *filemap_get_entry(struct address_space *mapping, pgoff_t index); @@ -1271,11 +1283,6 @@ void folio_wait_private_2(struct folio *folio); int folio_wait_private_2_killable(struct folio *folio); /* - * Add an arbitrary waiter to a page's wait queue - */ -void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter); - -/* * Fault in userspace address range. */ size_t fault_in_writeable(char __user *uaddr, size_t size); @@ -1353,6 +1360,7 @@ struct readahead_control { pgoff_t _index; unsigned int _nr_pages; unsigned int _batch_count; + bool dropbehind; bool _workingset; unsigned long _pflags; }; diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 3a4860bd2758..3a10f8cfc3ad 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -45,6 +45,10 @@ struct pci_ecam_ops { unsigned int bus_shift; struct pci_ops pci_ops; int (*init)(struct pci_config_window *); + int (*enable_device)(struct pci_host_bridge *, + struct pci_dev *); + void (*disable_device)(struct pci_host_bridge *, + struct pci_dev *); }; /* diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 18a3aeb62ae4..ee6156bcbbd0 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -157,7 +157,7 @@ struct pci_epf { struct device dev; const char *name; struct pci_epf_header *header; - struct pci_epf_bar bar[6]; + struct pci_epf_bar bar[PCI_STD_NUM_BARS]; u8 msi_interrupts; u16 msix_interrupts; u8 func_no; @@ -174,7 +174,7 @@ struct pci_epf { /* Below members are to attach secondary EPC to an endpoint function */ struct pci_epc *sec_epc; struct list_head sec_epc_list; - struct pci_epf_bar sec_epc_bar[6]; + struct pci_epf_bar sec_epc_bar[PCI_STD_NUM_BARS]; u8 sec_epc_func_no; struct config_group *group; unsigned int is_bound; diff --git a/include/linux/pci.h b/include/linux/pci.h index db9b47ce3eef..47b31ad724fa 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -407,7 +407,7 @@ struct pci_dev { supported from root to here */ #endif unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */ - unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */ + unsigned int eetlp_prefix_max:3; /* Max # of End-End TLP Prefixes, 0=not supported */ pci_channel_state_t error_state; /* Current connectivity state */ struct device dev; /* Generic device interface */ @@ -595,6 +595,8 @@ struct pci_host_bridge { u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ int (*map_irq)(const struct pci_dev *, u8, u8); void (*release_fn)(struct pci_host_bridge *); + int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev); + void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev); void *release_data; unsigned int ignore_reset_delay:1; /* For entire hierarchy */ unsigned int no_ext_tags:1; /* No Extended Tags */ @@ -1047,6 +1049,20 @@ struct pci_driver { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 /** + * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form + * @vend: the vendor name + * @dev: the 16 bit PCI Device ID + * @subvend: the 16 bit PCI Subvendor ID + * @subdev: the 16 bit PCI Subdevice ID + * + * Generate the pci_device_id struct layout for the specific PCI + * device/subdevice. Private data may follow the output. + */ +#define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \ + .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ + .subvendor = (subvend), .subdevice = (subdev), 0, 0 + +/** * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form * @vend: the vendor name (without PCI_VENDOR_ID_ prefix) * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix) @@ -2297,6 +2313,7 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { } #endif +int pcim_intx(struct pci_dev *pdev, int enabled); int pcim_request_all_regions(struct pci_dev *pdev, const char *name); void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index b5b5d17998b8..733f4ddd2ef1 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -50,7 +50,6 @@ struct dw_xpcs; struct phylink_pcs *xpcs_to_phylink_pcs(struct dw_xpcs *xpcs); int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); -void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces); int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable); struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index cb99ec8c9e96..8333f132f4a9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1279,6 +1279,11 @@ static inline void perf_sample_save_callchain(struct perf_sample_data *data, { int size = 1; + if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) + return; + if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_CALLCHAIN)) + return; + data->callchain = perf_callchain(event, regs); size += data->callchain->nr; @@ -1287,12 +1292,18 @@ static inline void perf_sample_save_callchain(struct perf_sample_data *data, } static inline void perf_sample_save_raw_data(struct perf_sample_data *data, + struct perf_event *event, struct perf_raw_record *raw) { struct perf_raw_frag *frag = &raw->frag; u32 sum = 0; int size; + if (!(event->attr.sample_type & PERF_SAMPLE_RAW)) + return; + if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_RAW)) + return; + do { sum += frag->size; if (perf_raw_frag_last(frag)) @@ -1309,6 +1320,11 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data, data->sample_flags |= PERF_SAMPLE_RAW; } +static inline bool has_branch_stack(struct perf_event *event) +{ + return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; +} + static inline void perf_sample_save_brstack(struct perf_sample_data *data, struct perf_event *event, struct perf_branch_stack *brs, @@ -1316,6 +1332,11 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data, { int size = sizeof(u64); /* nr */ + if (!has_branch_stack(event)) + return; + if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_BRANCH_STACK)) + return; + if (branch_sample_hw_index(event)) size += sizeof(u64); size += brs->nr * sizeof(struct perf_branch_entry); @@ -1669,6 +1690,8 @@ static inline int perf_allow_tracepoint(struct perf_event_attr *attr) return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT); } +extern int perf_exclude_event(struct perf_event *event, struct pt_regs *regs); + extern void perf_event_init(void); extern void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, @@ -1705,11 +1728,6 @@ static inline unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs) # define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs) #endif -static inline bool has_branch_stack(struct perf_event *event) -{ - return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; -} - static inline bool needs_branch_stack(struct perf_event *event) { return event->attr.branch_sample_type != 0; @@ -1879,6 +1897,10 @@ static inline u64 perf_event_pause(struct perf_event *event, bool reset) { return 0; } +static inline int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) +{ + return 0; +} #endif #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index adef9d6e9b1b..94d267d02372 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -533,7 +533,14 @@ static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, static inline void ptep_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - ptep_get_and_clear(mm, addr, ptep); + pte_t pte = ptep_get(ptep); + + pte_clear(mm, addr, ptep); + /* + * No need for ptep_get_and_clear(): page table check doesn't care about + * any bits that could have been set by HW concurrently. + */ + page_table_check_pte_clear(mm, pte); } #ifdef CONFIG_GUP_GET_PXX_LOW_HIGH diff --git a/include/linux/phy.h b/include/linux/phy.h index 563c46205685..19f076a71f94 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -32,19 +32,6 @@ #include <linux/atomic.h> #include <net/eee.h> -#define PHY_DEFAULT_FEATURES (SUPPORTED_Autoneg | \ - SUPPORTED_TP | \ - SUPPORTED_MII) - -#define PHY_10BT_FEATURES (SUPPORTED_10baseT_Half | \ - SUPPORTED_10baseT_Full) - -#define PHY_100BT_FEATURES (SUPPORTED_100baseT_Half | \ - SUPPORTED_100baseT_Full) - -#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \ - SUPPORTED_1000baseT_Full) - extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init; @@ -62,16 +49,11 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init; #define PHY_BASIC_T1S_P2MP_FEATURES ((unsigned long *)&phy_basic_t1s_p2mp_features) #define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features) #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) -#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) -#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features) -#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) #define PHY_EEE_CAP1_FEATURES ((unsigned long *)&phy_eee_cap1_features) #define PHY_EEE_CAP2_FEATURES ((unsigned long *)&phy_eee_cap2_features) extern const int phy_basic_ports_array[3]; -extern const int phy_fibre_port_array[1]; -extern const int phy_all_ports_features_array[7]; extern const int phy_10_100_features_array[4]; extern const int phy_basic_t1_features_array[3]; extern const int phy_basic_t1s_p2mp_features_array[2]; @@ -298,6 +280,29 @@ static inline const char *phy_modes(phy_interface_t interface) } } +/** + * rgmii_clock - map link speed to the clock rate + * @speed: link speed value + * + * Description: maps RGMII supported link speeds + * into the clock rates. + * + * Returns: clock rate or negative errno + */ +static inline long rgmii_clock(int speed) +{ + switch (speed) { + case SPEED_10: + return 2500000; + case SPEED_100: + return 25000000; + case SPEED_1000: + return 125000000; + default: + return -EINVAL; + } +} + #define PHY_INIT_TIMEOUT 100000 #define PHY_FORCE_TIMEOUT 10 @@ -818,6 +823,24 @@ struct phy_tdr_config { #define PHY_PAIR_ALL -1 /** + * enum link_inband_signalling - in-band signalling modes that are supported + * + * @LINK_INBAND_DISABLE: in-band signalling can be disabled + * @LINK_INBAND_ENABLE: in-band signalling can be enabled without bypass + * @LINK_INBAND_BYPASS: in-band signalling can be enabled with bypass + * + * The possible and required bits can only be used if the valid bit is set. + * If possible is clear, that means inband signalling can not be used. + * Required is only valid when possible is set, and means that inband + * signalling must be used. + */ +enum link_inband_signalling { + LINK_INBAND_DISABLE = BIT(0), + LINK_INBAND_ENABLE = BIT(1), + LINK_INBAND_BYPASS = BIT(2), +}; + +/** * struct phy_plca_cfg - Configuration of the PLCA (Physical Layer Collision * Avoidance) Reconciliation Sublayer. * @@ -957,6 +980,19 @@ struct phy_driver { int (*get_features)(struct phy_device *phydev); /** + * @inband_caps: query whether in-band is supported for the given PHY + * interface mode. Returns a bitmask of bits defined by enum + * link_inband_signalling. + */ + unsigned int (*inband_caps)(struct phy_device *phydev, + phy_interface_t interface); + + /** + * @config_inband: configure in-band mode for the PHY + */ + int (*config_inband)(struct phy_device *phydev, unsigned int modes); + + /** * @get_rate_matching: Get the supported type of rate matching for a * particular phy interface. This is used by phy consumers to determine * whether to advertise lower-speed modes for that interface. It is @@ -1090,6 +1126,53 @@ struct phy_driver { int (*cable_test_get_status)(struct phy_device *dev, bool *finished); /* Get statistics from the PHY using ethtool */ + /** + * @get_phy_stats: Retrieve PHY statistics. + * @dev: The PHY device for which the statistics are retrieved. + * @eth_stats: structure where Ethernet PHY stats will be stored. + * @stats: structure where additional PHY-specific stats will be stored. + * + * Retrieves the supported PHY statistics and populates the provided + * structures. The input structures are pre-initialized with + * `ETHTOOL_STAT_NOT_SET`, and the driver must only modify members + * corresponding to supported statistics. Unmodified members will remain + * set to `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace. + */ + void (*get_phy_stats)(struct phy_device *dev, + struct ethtool_eth_phy_stats *eth_stats, + struct ethtool_phy_stats *stats); + + /** + * @get_link_stats: Retrieve link statistics. + * @dev: The PHY device for which the statistics are retrieved. + * @link_stats: structure where link-specific stats will be stored. + * + * Retrieves link-related statistics for the given PHY device. The input + * structure is pre-initialized with `ETHTOOL_STAT_NOT_SET`, and the + * driver must only modify members corresponding to supported + * statistics. Unmodified members will remain set to + * `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace. + */ + void (*get_link_stats)(struct phy_device *dev, + struct ethtool_link_ext_stats *link_stats); + + /** + * @update_stats: Trigger periodic statistics updates. + * @dev: The PHY device for which statistics updates are triggered. + * + * Periodically gathers statistics from the PHY device to update locally + * maintained 64-bit counters. This is necessary for PHYs that implement + * reduced-width counters (e.g., 16-bit or 32-bit) which can overflow + * more frequently compared to 64-bit counters. By invoking this + * callback, drivers can fetch the current counter values, handle + * overflow detection, and accumulate the results into local 64-bit + * counters for accurate reporting through the `get_phy_stats` and + * `get_link_stats` interfaces. + * + * Return: 0 on success or a negative error code on failure. + */ + int (*update_stats)(struct phy_device *dev); + /** @get_sset_count: Number of statistic counters */ int (*get_sset_count)(struct phy_device *dev); /** @get_strings: Names of the statistic counters */ @@ -1580,6 +1663,9 @@ static inline bool phy_polling_mode(struct phy_device *phydev) if (phydev->drv->flags & PHY_POLL_CABLE_TEST) return true; + if (phydev->drv->update_stats) + return true; + return phydev->irq == PHY_POLL; } @@ -1818,6 +1904,9 @@ int phy_config_aneg(struct phy_device *phydev); int _phy_start_aneg(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); int phy_aneg_done(struct phy_device *phydev); +unsigned int phy_inband_caps(struct phy_device *phydev, + phy_interface_t interface); +int phy_config_inband(struct phy_device *phydev, unsigned int modes); int phy_speed_down(struct phy_device *phydev, bool sync); int phy_speed_up(struct phy_device *phydev); bool phy_check_valid(int speed, int duplex, unsigned long *features); @@ -1957,7 +2046,7 @@ int genphy_c45_plca_set_cfg(struct phy_device *phydev, int genphy_c45_plca_get_status(struct phy_device *phydev, struct phy_plca_status *plca_st); int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv, - unsigned long *lp, bool *is_enabled); + unsigned long *lp); int genphy_c45_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data); int genphy_c45_ethtool_set_eee(struct phy_device *phydev, @@ -2014,6 +2103,7 @@ void phy_advertise_eee_all(struct phy_device *phydev); void phy_support_sym_pause(struct phy_device *phydev); void phy_support_asym_pause(struct phy_device *phydev); void phy_support_eee(struct phy_device *phydev); +void phy_disable_eee(struct phy_device *phydev); void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx, bool autoneg); void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); @@ -2038,6 +2128,8 @@ int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask); int phy_unregister_fixup_for_id(const char *bus_id); int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask); +int phy_eee_tx_clock_stop_capable(struct phy_device *phydev); +int phy_eee_rx_clock_stop(struct phy_device *phydev, bool clk_stop_enable); int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable); int phy_get_eee_err(struct phy_device *phydev); int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data); @@ -2065,6 +2157,13 @@ int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data); int phy_ethtool_get_sset_count(struct phy_device *phydev); int phy_ethtool_get_stats(struct phy_device *phydev, struct ethtool_stats *stats, u64 *data); + +void __phy_ethtool_get_phy_stats(struct phy_device *phydev, + struct ethtool_eth_phy_stats *phy_stats, + struct ethtool_phy_stats *phydev_stats); +void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev, + struct ethtool_link_ext_stats *link_stats); + int phy_ethtool_get_plca_cfg(struct phy_device *phydev, struct phy_plca_cfg *plca_cfg); int phy_ethtool_set_plca_cfg(struct phy_device *phydev, diff --git a/include/linux/phylib_stubs.h b/include/linux/phylib_stubs.h index 1279f48c8a70..9d2d6090c86d 100644 --- a/include/linux/phylib_stubs.h +++ b/include/linux/phylib_stubs.h @@ -5,6 +5,9 @@ #include <linux/rtnetlink.h> +struct ethtool_eth_phy_stats; +struct ethtool_link_ext_stats; +struct ethtool_phy_stats; struct kernel_hwtstamp_config; struct netlink_ext_ack; struct phy_device; @@ -19,6 +22,11 @@ struct phylib_stubs { int (*hwtstamp_set)(struct phy_device *phydev, struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack); + void (*get_phy_stats)(struct phy_device *phydev, + struct ethtool_eth_phy_stats *phy_stats, + struct ethtool_phy_stats *phydev_stats); + void (*get_link_ext_stats)(struct phy_device *phydev, + struct ethtool_link_ext_stats *link_stats); }; static inline int phy_hwtstamp_get(struct phy_device *phydev, @@ -50,6 +58,29 @@ static inline int phy_hwtstamp_set(struct phy_device *phydev, return phylib_stubs->hwtstamp_set(phydev, config, extack); } +static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev, + struct ethtool_eth_phy_stats *phy_stats, + struct ethtool_phy_stats *phydev_stats) +{ + ASSERT_RTNL(); + + if (!phylib_stubs) + return; + + phylib_stubs->get_phy_stats(phydev, phy_stats, phydev_stats); +} + +static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev, + struct ethtool_link_ext_stats *link_stats) +{ + ASSERT_RTNL(); + + if (!phylib_stubs) + return; + + phylib_stubs->get_link_ext_stats(phydev, link_stats); +} + #else static inline int phy_hwtstamp_get(struct phy_device *phydev, @@ -65,4 +96,15 @@ static inline int phy_hwtstamp_set(struct phy_device *phydev, return -EOPNOTSUPP; } +static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev, + struct ethtool_eth_phy_stats *phy_stats, + struct ethtool_phy_stats *phydev_stats) +{ +} + +static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev, + struct ethtool_link_ext_stats *link_stats) +{ +} + #endif diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 5c01048860c4..898b00451bbf 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -5,6 +5,8 @@ #include <linux/spinlock.h> #include <linux/workqueue.h> +#include <net/eee.h> + struct device_node; struct ethtool_cmd; struct fwnode_handle; @@ -143,11 +145,17 @@ enum phylink_op_type { * possible and avoid stopping it during suspend events. * @default_an_inband: if true, defaults to MLO_AN_INBAND rather than * MLO_AN_PHY. A fixed-link specification will override. + * @eee_rx_clk_stop_enable: if true, PHY can stop the receive clock during LPI * @get_fixed_state: callback to execute to determine the fixed link state, * if MAC link is at %MLO_AN_FIXED mode. * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx * are supported by the MAC/PCS. + * @lpi_interfaces: bitmap describing which PHY interface modes can support + * LPI signalling. * @mac_capabilities: MAC pause/speed/duplex capabilities. + * @lpi_capabilities: MAC speeds which can support LPI signalling + * @lpi_timer_default: Default EEE LPI timer setting. + * @eee_enabled_default: If set, EEE will be enabled by phylink at creation time */ struct phylink_config { struct device *dev; @@ -156,10 +164,15 @@ struct phylink_config { bool mac_managed_pm; bool mac_requires_rxc; bool default_an_inband; + bool eee_rx_clk_stop_enable; void (*get_fixed_state)(struct phylink_config *config, struct phylink_link_state *state); DECLARE_PHY_INTERFACE_MASK(supported_interfaces); + DECLARE_PHY_INTERFACE_MASK(lpi_interfaces); unsigned long mac_capabilities; + unsigned long lpi_capabilities; + u32 lpi_timer_default; + bool eee_enabled_default; }; void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed); @@ -173,6 +186,8 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed); * @mac_finish: finish a major reconfiguration of the interface. * @mac_link_down: take the link down. * @mac_link_up: allow the link to come up. + * @mac_disable_tx_lpi: disable LPI. + * @mac_enable_tx_lpi: enable and configure LPI. * * The individual methods are described more fully below. */ @@ -193,6 +208,9 @@ struct phylink_mac_ops { struct phy_device *phy, unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause); + void (*mac_disable_tx_lpi)(struct phylink_config *config); + int (*mac_enable_tx_lpi)(struct phylink_config *config, u32 timer, + bool tx_clk_stop); }; #if 0 /* For kernel-doc purposes only. */ @@ -387,12 +405,41 @@ void mac_link_down(struct phylink_config *config, unsigned int mode, void mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause); + +/** + * mac_disable_tx_lpi() - disable LPI generation at the MAC + * @config: a pointer to a &struct phylink_config. + * + * Disable generation of LPI at the MAC, effectively preventing the MAC + * from indicating that it is idle. + */ +void mac_disable_tx_lpi(struct phylink_config *config); + +/** + * mac_enable_tx_lpi() - configure and enable LPI generation at the MAC + * @config: a pointer to a &struct phylink_config. + * @timer: LPI timeout in microseconds. + * @tx_clk_stop: allow xMII transmit clock to be stopped during LPI + * + * Configure the LPI timeout accordingly. This will only be called when + * the link is already up, to cater for situations where the hardware + * needs to be programmed according to the link speed. + * + * Enable LPI generation at the MAC, and configure whether the xMII transmit + * clock may be stopped. + * + * Returns: 0 on success. Please consult with rmk before returning an error. + */ +int mac_enable_tx_lpi(struct phylink_config *config, u32 timer, + bool tx_clk_stop); #endif struct phylink_pcs_ops; /** * struct phylink_pcs - PHYLINK PCS instance + * @supported_interfaces: describing which PHY_INTERFACE_MODE_xxx + * are supported by this PCS. * @ops: a pointer to the &struct phylink_pcs_ops structure * @phylink: pointer to &struct phylink_config * @neg_mode: provide PCS neg mode via "mode" argument @@ -409,6 +456,7 @@ struct phylink_pcs_ops; * the PCS driver. */ struct phylink_pcs { + DECLARE_PHY_INTERFACE_MASK(supported_interfaces); const struct phylink_pcs_ops *ops; struct phylink *phylink; bool neg_mode; @@ -419,6 +467,7 @@ struct phylink_pcs { /** * struct phylink_pcs_ops - MAC PCS operations structure. * @pcs_validate: validate the link configuration. + * @pcs_inband_caps: query inband support for interface mode. * @pcs_enable: enable the PCS. * @pcs_disable: disable the PCS. * @pcs_pre_config: pre-mac_config method (for errata) @@ -434,13 +483,15 @@ struct phylink_pcs { struct phylink_pcs_ops { int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported, const struct phylink_link_state *state); + unsigned int (*pcs_inband_caps)(struct phylink_pcs *pcs, + phy_interface_t interface); int (*pcs_enable)(struct phylink_pcs *pcs); void (*pcs_disable)(struct phylink_pcs *pcs); void (*pcs_pre_config)(struct phylink_pcs *pcs, phy_interface_t interface); int (*pcs_post_config)(struct phylink_pcs *pcs, phy_interface_t interface); - void (*pcs_get_state)(struct phylink_pcs *pcs, + void (*pcs_get_state)(struct phylink_pcs *pcs, unsigned int neg_mode, struct phylink_link_state *state); int (*pcs_config)(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, @@ -471,6 +522,20 @@ int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported, const struct phylink_link_state *state); /** + * pcs_inband_caps - query PCS in-band capabilities for interface mode. + * @pcs: a pointer to a &struct phylink_pcs. + * @interface: interface mode to be queried + * + * Returns zero if it is unknown what in-band signalling is supported by the + * PHY (e.g. because the PHY driver doesn't implement the method.) Otherwise, + * returns a bit mask of the LINK_INBAND_* values from + * &enum link_inband_signalling to describe which inband modes are supported + * for this interface mode. + */ +unsigned int pcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface); + +/** * pcs_enable() - enable the PCS. * @pcs: a pointer to a &struct phylink_pcs. */ @@ -485,6 +550,7 @@ void pcs_disable(struct phylink_pcs *pcs); /** * pcs_get_state() - Read the current inband link state from the hardware * @pcs: a pointer to a &struct phylink_pcs. + * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx) * @state: a pointer to a &struct phylink_link_state. * * Read the current inband link state from the MAC PCS, reporting the @@ -493,8 +559,11 @@ void pcs_disable(struct phylink_pcs *pcs); * negotiation completion state in @state->an_complete, and link up state * in @state->link. If possible, @state->lp_advertising should also be * populated. + * + * Note that the @neg_mode parameter is always the PHYLINK_PCS_NEG_xxx + * state, not MLO_AN_xxx. */ -void pcs_get_state(struct phylink_pcs *pcs, +void pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, struct phylink_link_state *state); /** @@ -669,8 +738,9 @@ static inline int phylink_get_link_timer_ns(phy_interface_t interface) } void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state, - u16 bmsr, u16 lpa); + unsigned int neg_mode, u16 bmsr, u16 lpa); void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, + unsigned int neg_mode, struct phylink_link_state *state); int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface, const unsigned long *advertising); diff --git a/include/linux/pid.h b/include/linux/pid.h index a3aad9b4074c..98837a1ff0f3 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -59,6 +59,7 @@ struct pid spinlock_t lock; struct dentry *stashed; u64 ino; + struct rb_node pidfs_node; /* lists of tasks that use this pid */ struct hlist_head tasks[PIDTYPE_MAX]; struct hlist_head inodes; @@ -68,6 +69,7 @@ struct pid struct upid numbers[]; }; +extern seqcount_spinlock_t pidmap_lock_seq; extern struct pid init_struct_pid; struct file; @@ -106,9 +108,6 @@ extern void exchange_tids(struct task_struct *task, struct task_struct *old); extern void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type); -extern int pid_max; -extern int pid_max_min, pid_max_max; - /* * look up a PID in the hash table. Must be called with the tasklist_lock * or rcu_read_lock() held. diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index f9f9931e02d6..7c67a5811199 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -30,6 +30,7 @@ struct pid_namespace { struct task_struct *child_reaper; struct kmem_cache *pid_cachep; unsigned int level; + int pid_max; struct pid_namespace *parent; #ifdef CONFIG_BSD_PROCESS_ACCT struct fs_pin *bacct; @@ -38,9 +39,14 @@ struct pid_namespace { struct ucounts *ucounts; int reboot; /* group exit code if this pidns was rebooted */ struct ns_common ns; -#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) + struct work_struct work; +#ifdef CONFIG_SYSCTL + struct ctl_table_set set; + struct ctl_table_header *sysctls; +#if defined(CONFIG_MEMFD_CREATE) int memfd_noexec_scope; #endif +#endif } __randomize_layout; extern struct pid_namespace init_pid_ns; @@ -117,6 +123,8 @@ static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); void pidhash_init(void); void pid_idr_init(void); +int register_pidns_sysctls(struct pid_namespace *pidns); +void unregister_pidns_sysctls(struct pid_namespace *pidns); static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) { diff --git a/include/linux/pidfs.h b/include/linux/pidfs.h index 75bdf9807802..7c830d0dec9a 100644 --- a/include/linux/pidfs.h +++ b/include/linux/pidfs.h @@ -4,5 +4,8 @@ struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags); void __init pidfs_init(void); +void pidfs_add_pid(struct pid *pid); +void pidfs_remove_pid(struct pid *pid); +extern const struct dentry_operations pidfs_dentry_operations; #endif /* _LINUX_PID_FS_H */ diff --git a/include/linux/platform_data/clk-davinci-pll.h b/include/linux/platform_data/clk-davinci-pll.h deleted file mode 100644 index e55dab1d578b..000000000000 --- a/include/linux/platform_data/clk-davinci-pll.h +++ /dev/null @@ -1,21 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PLL clock driver for TI Davinci SoCs - * - * Copyright (C) 2018 David Lechner <david@lechnology.com> - */ - -#ifndef __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ -#define __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ - -#include <linux/regmap.h> - -/** - * davinci_pll_platform_data - * @cfgchip: CFGCHIP syscon regmap - */ -struct davinci_pll_platform_data { - struct regmap *cfgchip; -}; - -#endif /* __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ */ diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index b3c4993e656e..ecf290a0c98f 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -5044,8 +5044,11 @@ struct ec_response_pd_status { #define PD_EVENT_POWER_CHANGE BIT(1) #define PD_EVENT_IDENTITY_RECEIVED BIT(2) #define PD_EVENT_DATA_SWAP BIT(3) +#define PD_EVENT_TYPEC BIT(4) +#define PD_EVENT_PPM BIT(5) + struct ec_response_host_event_status { - uint32_t status; /* PD MCU host event status */ + uint32_t status; /* PD MCU host event status */ } __ec_align4; /* Set USB type-C port role and muxes */ @@ -6105,6 +6108,29 @@ struct ec_response_typec_vdm_response { #undef VDO_MAX_SIZE +/* + * UCSI OPM-PPM commands + * + * These commands are used for communication between OPM and PPM. + * Only UCSI3.0 is tested. + */ + +#define EC_CMD_UCSI_PPM_SET 0x0140 + +/* The data size is stored in the host command protocol header. */ +struct ec_params_ucsi_ppm_set { + uint16_t offset; + uint8_t data[]; +} __ec_align2; + +#define EC_CMD_UCSI_PPM_GET 0x0141 + +/* For 'GET' sub-commands, data will be returned as a raw payload. */ +struct ec_params_ucsi_ppm_get { + uint16_t offset; + uint8_t size; +} __ec_align2; + /*****************************************************************************/ /* The command range 0x200-0x2FF is reserved for Rotor. */ diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index b34ed0cc1f8d..3ec24f445c29 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -42,6 +42,11 @@ #define EC_MAX_RESPONSE_OVERHEAD 32 /* + * ACPI notify value for MKBP host event. + */ +#define ACPI_NOTIFY_CROS_EC_MKBP 0x80 + +/* * EC panic is not covered by the standard (0-F) ACPI notify values. * Arbitrarily choosing B0 to notify ec panic, which is in the 84-BF * device specific ACPI notify range. @@ -246,6 +251,8 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); +int cros_ec_rwsig_continue(struct cros_ec_device *ec_dev); + int cros_ec_query_all(struct cros_ec_device *ec_dev); int cros_ec_get_next_event(struct cros_ec_device *ec_dev, diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h deleted file mode 100644 index 98967df07468..000000000000 --- a/include/linux/platform_data/i2c-davinci.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * DaVinci I2C controller platform_device info - * - * Author: Vladimir Barinov, MontaVista Software, Inc. <source@mvista.com> - * - * 2007 (c) MontaVista Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. -*/ - -#ifndef __ASM_ARCH_I2C_H -#define __ASM_ARCH_I2C_H - -/* All frequencies are expressed in kHz */ -struct davinci_i2c_platform_data { - unsigned int bus_freq; /* standard bus frequency (kHz) */ - unsigned int bus_delay; /* post-transaction delay (usec) */ - bool gpio_recovery; /* Use GPIO recovery method */ - bool has_pfunc; /* Chip has a ICPFUNC register */ -}; - -/* for board setup code */ -void davinci_init_i2c(struct davinci_i2c_platform_data *); - -#endif /* __ASM_ARCH_I2C_H */ diff --git a/include/linux/platform_data/keyscan-davinci.h b/include/linux/platform_data/keyscan-davinci.h deleted file mode 100644 index 260d596ba0af..000000000000 --- a/include/linux/platform_data/keyscan-davinci.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2009 Texas Instruments, Inc - * - * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com> - */ - -#ifndef DAVINCI_KEYSCAN_H -#define DAVINCI_KEYSCAN_H - -#include <linux/io.h> - -enum davinci_matrix_types { - DAVINCI_KEYSCAN_MATRIX_4X4, - DAVINCI_KEYSCAN_MATRIX_5X3, -}; - -struct davinci_ks_platform_data { - int (*device_enable)(struct device *dev); - unsigned short *keymap; - u32 keymapsize; - u8 rep:1; - u8 strobe; - u8 interval; - u8 matrix_type; -}; - -#endif - diff --git a/include/linux/platform_data/syscon.h b/include/linux/platform_data/syscon.h deleted file mode 100644 index 2c089dd3e2bd..000000000000 --- a/include/linux/platform_data/syscon.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef PLATFORM_DATA_SYSCON_H -#define PLATFORM_DATA_SYSCON_H - -struct syscon_platform_data { - const char *label; -}; - -#endif diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 365e119bebaa..783e2a336861 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -186,6 +186,11 @@ static const struct dmi_system_id asus_use_hid_led_dmi_ids[] = { }, { .matches = { + DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt P16"), + }, + }, + { + .matches = { DMI_MATCH(DMI_BOARD_NAME, "GA403U"), }, }, diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h index f5492ed413f3..8ab5b0e8eb2c 100644 --- a/include/linux/platform_profile.h +++ b/include/linux/platform_profile.h @@ -9,6 +9,7 @@ #ifndef _PLATFORM_PROFILE_H_ #define _PLATFORM_PROFILE_H_ +#include <linux/device.h> #include <linux/bitops.h> /* @@ -23,20 +24,34 @@ enum platform_profile_option { PLATFORM_PROFILE_BALANCED, PLATFORM_PROFILE_BALANCED_PERFORMANCE, PLATFORM_PROFILE_PERFORMANCE, + PLATFORM_PROFILE_CUSTOM, PLATFORM_PROFILE_LAST, /*must always be last */ }; -struct platform_profile_handler { - unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; - int (*profile_get)(struct platform_profile_handler *pprof, - enum platform_profile_option *profile); - int (*profile_set)(struct platform_profile_handler *pprof, - enum platform_profile_option profile); +/** + * struct platform_profile_ops - platform profile operations + * @probe: Callback to setup choices available to the new class device. These + * choices will only be enforced when setting a new profile, not when + * getting the current one. + * @profile_get: Callback that will be called when showing the current platform + * profile in sysfs. + * @profile_set: Callback that will be called when storing a new platform + * profile in sysfs. + */ +struct platform_profile_ops { + int (*probe)(void *drvdata, unsigned long *choices); + int (*profile_get)(struct device *dev, enum platform_profile_option *profile); + int (*profile_set)(struct device *dev, enum platform_profile_option profile); }; -int platform_profile_register(struct platform_profile_handler *pprof); -int platform_profile_remove(void); +struct device *platform_profile_register(struct device *dev, const char *name, + void *drvdata, + const struct platform_profile_ops *ops); +int platform_profile_remove(struct device *dev); +struct device *devm_platform_profile_register(struct device *dev, const char *name, + void *drvdata, + const struct platform_profile_ops *ops); int platform_profile_cycle(void); -void platform_profile_notify(void); +void platform_profile_notify(struct device *dev); #endif /*_PLATFORM_PROFILE_H_*/ diff --git a/include/linux/pldmfw.h b/include/linux/pldmfw.h index 0fc831338226..f5047983004f 100644 --- a/include/linux/pldmfw.h +++ b/include/linux/pldmfw.h @@ -125,9 +125,17 @@ struct pldmfw_ops; * a pointer to their own data, used to implement the device specific * operations. */ + +enum pldmfw_update_mode { + PLDMFW_UPDATE_MODE_FULL, + PLDMFW_UPDATE_MODE_SINGLE_COMPONENT, +}; + struct pldmfw { const struct pldmfw_ops *ops; struct device *dev; + u16 component_identifier; + enum pldmfw_update_mode mode; }; bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record); diff --git a/include/linux/pm.h b/include/linux/pm.h index e7f0260f15ad..08c37b83fea8 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -570,7 +570,8 @@ const struct dev_pm_ops name = { \ { .event = PM_EVENT_AUTO_RESUME, }) #define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) - +#define PMSG_NO_WAKEUP(msg) (((msg).event & \ + (PM_EVENT_FREEZE | PM_EVENT_QUIESCE)) != 0) /* * Device run-time power management status. * diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 45646bfcaf1a..1aab31370065 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -147,6 +147,7 @@ struct genpd_governor_data { }; struct genpd_power_state { + const char *name; s64 power_off_latency_ns; s64 power_on_latency_ns; s64 residency_ns; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 568183e3e641..c247317aae38 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -100,8 +100,11 @@ struct dev_pm_opp_data { #if defined(CONFIG_PM_OPP) struct opp_table *dev_pm_opp_get_opp_table(struct device *dev); +void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table); void dev_pm_opp_put_opp_table(struct opp_table *opp_table); +unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index); + unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies); @@ -158,6 +161,7 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, unsigned int *bw, int index); +void dev_pm_opp_get(struct dev_pm_opp *opp); void dev_pm_opp_put(struct dev_pm_opp *opp); int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp); @@ -203,8 +207,15 @@ static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device * return ERR_PTR(-EOPNOTSUPP); } +static inline void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) {} + static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {} +static inline unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index) +{ + return 0; +} + static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) { return 0; @@ -334,6 +345,8 @@ static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, return ERR_PTR(-EOPNOTSUPP); } +static inline void dev_pm_opp_get(struct dev_pm_opp *opp) {} + static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} static inline int diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h index d9642c6cf852..25b63ed51b76 100644 --- a/include/linux/pm_wakeirq.h +++ b/include/linux/pm_wakeirq.h @@ -10,6 +10,7 @@ extern int dev_pm_set_wake_irq(struct device *dev, int irq); extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq); extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq); extern void dev_pm_clear_wake_irq(struct device *dev); +extern int devm_pm_set_wake_irq(struct device *dev, int irq); #else /* !CONFIG_PM */ @@ -32,5 +33,10 @@ static inline void dev_pm_clear_wake_irq(struct device *dev) { } +static inline int devm_pm_set_wake_irq(struct device *dev, int irq) +{ + return 0; +} + #endif /* CONFIG_PM */ #endif /* _LINUX_PM_WAKEIRQ_H */ diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 222f7530806c..d501c09c60cd 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -240,4 +240,21 @@ static inline int device_init_wakeup(struct device *dev, bool enable) return 0; } +static void device_disable_wakeup(void *dev) +{ + device_init_wakeup(dev, false); +} + +/** + * devm_device_init_wakeup - Resource managed device wakeup initialization. + * @dev: Device to handle. + * + * This function is the devm managed version of device_init_wakeup(dev, true). + */ +static inline int devm_device_init_wakeup(struct device *dev) +{ + device_init_wakeup(dev, true); + return devm_add_action_or_reset(dev, device_disable_wakeup, dev); +} + #endif /* _LINUX_PM_WAKEUP_H */ diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h index fa9f08164c36..884040e1383b 100644 --- a/include/linux/pmbus.h +++ b/include/linux/pmbus.h @@ -73,6 +73,20 @@ */ #define PMBUS_USE_COEFFICIENTS_CMD BIT(5) +/* + * PMBUS_OP_PROTECTED + * Set if the chip OPERATION command is protected and protection is not + * determined by the standard WRITE_PROTECT command. + */ +#define PMBUS_OP_PROTECTED BIT(6) + +/* + * PMBUS_VOUT_PROTECTED + * Set if the chip VOUT_COMMAND command is protected and protection is not + * determined by the standard WRITE_PROTECT command. + */ +#define PMBUS_VOUT_PROTECTED BIT(7) + struct pmbus_platform_data { u32 flags; /* Device specific flags */ diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 5180dc9f1706..6b190639b08e 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -61,6 +61,7 @@ struct bq27xxx_device_info { struct bq27xxx_access_methods bus; struct bq27xxx_reg_cache cache; int charge_design_full; + int voltage_min_design; bool removed; unsigned long last_update; union power_supply_propval last_status; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index b98106e1a90f..6ed53b292162 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -15,6 +15,8 @@ #include <linux/device.h> #include <linux/workqueue.h> #include <linux/leds.h> +#include <linux/rwsem.h> +#include <linux/list.h> #include <linux/spinlock.h> #include <linux/notifier.h> @@ -40,7 +42,7 @@ enum { }; /* What algorithm is the charger using? */ -enum { +enum power_supply_charge_type { POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, POWER_SUPPLY_CHARGE_TYPE_NONE, POWER_SUPPLY_CHARGE_TYPE_TRICKLE, /* slow speed */ @@ -58,6 +60,7 @@ enum { POWER_SUPPLY_HEALTH_OVERHEAT, POWER_SUPPLY_HEALTH_DEAD, POWER_SUPPLY_HEALTH_OVERVOLTAGE, + POWER_SUPPLY_HEALTH_UNDERVOLTAGE, POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, POWER_SUPPLY_HEALTH_COLD, POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE, @@ -99,6 +102,7 @@ enum power_supply_property { /* Properties of type `int' */ POWER_SUPPLY_PROP_STATUS = 0, POWER_SUPPLY_PROP_CHARGE_TYPE, + POWER_SUPPLY_PROP_CHARGE_TYPES, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, @@ -245,6 +249,7 @@ struct power_supply_desc { const char *name; enum power_supply_type type; u8 charge_behaviours; + u32 charge_types; u32 usb_types; const enum power_supply_property *properties; size_t num_properties; @@ -281,6 +286,28 @@ struct power_supply_desc { int use_for_apm; }; +struct power_supply_ext { + const char *const name; + u8 charge_behaviours; + const enum power_supply_property *properties; + size_t num_properties; + + int (*get_property)(struct power_supply *psy, + const struct power_supply_ext *ext, + void *data, + enum power_supply_property psp, + union power_supply_propval *val); + int (*set_property)(struct power_supply *psy, + const struct power_supply_ext *ext, + void *data, + enum power_supply_property psp, + const union power_supply_propval *val); + int (*property_is_writeable)(struct power_supply *psy, + const struct power_supply_ext *ext, + void *data, + enum power_supply_property psp); +}; + struct power_supply { const struct power_supply_desc *desc; @@ -300,10 +327,13 @@ struct power_supply { struct delayed_work deferred_register_work; spinlock_t changed_lock; bool changed; + bool update_groups; bool initialized; bool removing; atomic_t use_cnt; struct power_supply_battery_info *battery_info; + struct rw_semaphore extensions_sem; /* protects "extensions" */ + struct list_head extensions; #ifdef CONFIG_THERMAL struct thermal_zone_device *tzd; struct thermal_cooling_device *tcd; @@ -318,6 +348,8 @@ struct power_supply { #endif }; +#define dev_to_psy(__dev) container_of_const(__dev, struct power_supply, dev) + /* * This is recommended structure to specify static power supply parameters. * Generic one, parametrizable for different power supplies. Power supply @@ -878,10 +910,18 @@ devm_power_supply_register(struct device *parent, extern void power_supply_unregister(struct power_supply *psy); extern int power_supply_powers(struct power_supply *psy, struct device *dev); +extern int __must_check +power_supply_register_extension(struct power_supply *psy, + const struct power_supply_ext *ext, + struct device *dev, + void *data); +extern void power_supply_unregister_extension(struct power_supply *psy, + const struct power_supply_ext *ext); + #define to_power_supply(device) container_of(device, struct power_supply, dev) extern void *power_supply_get_drvdata(struct power_supply *psy); -extern int power_supply_for_each_device(void *data, int (*fn)(struct device *dev, void *data)); +extern int power_supply_for_each_psy(void *data, int (*fn)(struct power_supply *psy, void *data)); static inline bool power_supply_is_amp_property(enum power_supply_property psp) { @@ -944,6 +984,11 @@ ssize_t power_supply_charge_behaviour_show(struct device *dev, char *buf); int power_supply_charge_behaviour_parse(unsigned int available_behaviours, const char *buf); +ssize_t power_supply_charge_types_show(struct device *dev, + unsigned int available_types, + enum power_supply_charge_type current_type, + char *buf); +int power_supply_charge_types_parse(unsigned int available_types, const char *buf); #else static inline ssize_t power_supply_charge_behaviour_show(struct device *dev, @@ -959,6 +1004,20 @@ static inline int power_supply_charge_behaviour_parse(unsigned int available_beh { return -EOPNOTSUPP; } + +static inline +ssize_t power_supply_charge_types_show(struct device *dev, + unsigned int available_types, + enum power_supply_charge_type current_type, + char *buf) +{ + return -EOPNOTSUPP; +} + +static inline int power_supply_charge_types_parse(unsigned int available_types, const char *buf) +{ + return -EOPNOTSUPP; +} #endif #endif /* __LINUX_POWER_SUPPLY_H__ */ diff --git a/include/linux/pps_gen_kernel.h b/include/linux/pps_gen_kernel.h new file mode 100644 index 000000000000..022ea0ac4440 --- /dev/null +++ b/include/linux/pps_gen_kernel.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * PPS generator API kernel header + * + * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com> + */ + +#ifndef LINUX_PPS_GEN_KERNEL_H +#define LINUX_PPS_GEN_KERNEL_H + +#include <linux/pps_gen.h> +#include <linux/cdev.h> +#include <linux/device.h> + +/* + * Global defines + */ + +#define PPS_GEN_MAX_SOURCES 16 /* should be enough... */ + +struct pps_gen_device; + +/** + * struct pps_gen_source_info - the specific PPS generator info + * @use_system_clock: true, if the system clock is used to generate pulses + * @get_time: query the time stored into the generator clock + * @enable: enable/disable the PPS pulses generation + * + * This is the main generator struct where all needed information must be + * placed before calling the pps_gen_register_source(). + */ +struct pps_gen_source_info { + bool use_system_clock; + + int (*get_time)(struct pps_gen_device *pps_gen, + struct timespec64 *time); + int (*enable)(struct pps_gen_device *pps_gen, bool enable); + +/* private: internal use only */ + struct module *owner; + struct device *parent; /* for device_create */ +}; + +/* The main struct */ +struct pps_gen_device { + struct pps_gen_source_info info; /* PSS generator info */ + bool enabled; /* PSS generator status */ + + unsigned int event; + unsigned int sequence; + + unsigned int last_ev; /* last PPS event id */ + wait_queue_head_t queue; /* PPS event queue */ + + unsigned int id; /* PPS generator unique ID */ + struct cdev cdev; + struct device *dev; + struct fasync_struct *async_queue; /* fasync method */ + spinlock_t lock; +}; + +/* + * Global variables + */ + +extern const struct attribute_group *pps_gen_groups[]; + +/* + * Exported functions + */ + +extern struct pps_gen_device *pps_gen_register_source( + struct pps_gen_source_info *info); +extern void pps_gen_unregister_source(struct pps_gen_device *pps_gen); +extern void pps_gen_event(struct pps_gen_device *pps_gen, + unsigned int event, void *data); + +#endif /* LINUX_PPS_GEN_KERNEL_H */ diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index 78c8ac4951b5..c7abce28ed29 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h @@ -56,8 +56,7 @@ struct pps_device { unsigned int id; /* PPS source unique ID */ void const *lookup_cookie; /* For pps_lookup_dev() only */ - struct cdev cdev; - struct device *dev; + struct device dev; struct fasync_struct *async_queue; /* fasync method */ spinlock_t lock; }; diff --git a/include/linux/prmt.h b/include/linux/prmt.h index 9c094294403f..c53ab287e932 100644 --- a/include/linux/prmt.h +++ b/include/linux/prmt.h @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ +#include <linux/uuid.h> + #ifdef CONFIG_ACPI_PRMT void init_prmt(void); int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer); diff --git a/include/linux/property.h b/include/linux/property.h index 61fc20e5f81f..e214ecd241eb 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -37,6 +37,7 @@ struct fwnode_handle *__dev_fwnode(struct device *dev); struct device *: __dev_fwnode)(dev) bool device_property_present(const struct device *dev, const char *propname); +bool device_property_read_bool(const struct device *dev, const char *propname); int device_property_read_u8_array(const struct device *dev, const char *propname, u8 *val, size_t nval); int device_property_read_u16_array(const struct device *dev, const char *propname, @@ -54,6 +55,8 @@ int device_property_match_string(const struct device *dev, bool fwnode_property_present(const struct fwnode_handle *fwnode, const char *propname); +bool fwnode_property_read_bool(const struct fwnode_handle *fwnode, + const char *propname); int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode, const char *propname, u8 *val, size_t nval); @@ -207,12 +210,6 @@ int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name); unsigned int device_get_child_node_count(const struct device *dev); -static inline bool device_property_read_bool(const struct device *dev, - const char *propname) -{ - return device_property_present(dev, propname); -} - static inline int device_property_read_u8(const struct device *dev, const char *propname, u8 *val) { @@ -263,12 +260,6 @@ static inline int device_property_string_array_count(const struct device *dev, return device_property_read_string_array(dev, propname, NULL, 0); } -static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode, - const char *propname) -{ - return fwnode_property_present(fwnode, propname); -} - static inline int fwnode_property_read_u8(const struct fwnode_handle *fwnode, const char *propname, u8 *val) { diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index 591a53e082e6..c773eeb92d04 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -5,15 +5,37 @@ #ifndef _LINUX_PSE_CONTROLLER_H #define _LINUX_PSE_CONTROLLER_H -#include <linux/ethtool.h> #include <linux/list.h> #include <uapi/linux/ethtool.h> /* Maximum current in uA according to IEEE 802.3-2022 Table 145-1 */ #define MAX_PI_CURRENT 1920000 +/* Maximum power in mW according to IEEE 802.3-2022 Table 145-16 */ +#define MAX_PI_PW 99900 struct phy_device; struct pse_controller_dev; +struct netlink_ext_ack; + +/* C33 PSE extended state and substate. */ +struct ethtool_c33_pse_ext_state_info { + enum ethtool_c33_pse_ext_state c33_pse_ext_state; + union { + enum ethtool_c33_pse_ext_substate_error_condition error_condition; + enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable; + enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted; + enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim; + enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected; + enum ethtool_c33_pse_ext_substate_power_not_available power_not_available; + enum ethtool_c33_pse_ext_substate_short_detected short_detected; + u32 __c33_pse_ext_substate; + }; +}; + +struct ethtool_c33_pse_pw_limit_range { + u32 min; + u32 max; +}; /** * struct pse_control_config - PSE control/channel configuration. @@ -29,7 +51,52 @@ struct pse_control_config { }; /** - * struct pse_control_status - PSE control/channel status. + * struct pse_admin_state - PSE operational state + * + * @podl_admin_state: operational state of the PoDL PSE + * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState + * @c33_admin_state: operational state of the PSE + * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState + */ +struct pse_admin_state { + enum ethtool_podl_pse_admin_state podl_admin_state; + enum ethtool_c33_pse_admin_state c33_admin_state; +}; + +/** + * struct pse_pw_status - PSE power detection status + * + * @podl_pw_status: power detection status of the PoDL PSE. + * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus: + * @c33_pw_status: power detection status of the PSE. + * IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus: + */ +struct pse_pw_status { + enum ethtool_podl_pse_pw_d_status podl_pw_status; + enum ethtool_c33_pse_pw_d_status c33_pw_status; +}; + +/** + * struct pse_ext_state_info - PSE extended state information + * + * @c33_ext_state_info: extended state information of the PSE + */ +struct pse_ext_state_info { + struct ethtool_c33_pse_ext_state_info c33_ext_state_info; +}; + +/** + * struct pse_pw_limit_ranges - PSE power limit configuration range + * + * @c33_pw_limit_ranges: supported power limit configuration range. The driver + * is in charge of the memory allocation. + */ +struct pse_pw_limit_ranges { + struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges; +}; + +/** + * struct ethtool_pse_control_status - PSE control/channel status. * * @podl_admin_state: operational state of the PoDL PSE * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState @@ -47,11 +114,11 @@ struct pse_control_config { * @c33_avail_pw_limit: available power limit of the PSE in mW * IEEE 802.3-2022 145.2.5.4 pse_avail_pwr * @c33_pw_limit_ranges: supported power limit configuration range. The driver - * is in charge of the memory allocation. + * is in charge of the memory allocation * @c33_pw_limit_nb_ranges: number of supported power limit configuration * ranges */ -struct pse_control_status { +struct ethtool_pse_control_status { enum ethtool_podl_pse_admin_state podl_admin_state; enum ethtool_podl_pse_pw_d_status podl_pw_status; enum ethtool_c33_pse_admin_state c33_admin_state; @@ -67,40 +134,51 @@ struct pse_control_status { /** * struct pse_controller_ops - PSE controller driver callbacks * - * @ethtool_get_status: get PSE control status for ethtool interface * @setup_pi_matrix: setup PI matrix of the PSE controller - * @pi_is_enabled: Return 1 if the PSE PI is enabled, 0 if not. - * May also return negative errno. + * @pi_get_admin_state: Get the operational state of the PSE PI. This ops + * is mandatory. + * @pi_get_pw_status: Get the power detection status of the PSE PI. This + * ops is mandatory. + * @pi_get_ext_state: Get the extended state of the PSE PI. + * @pi_get_pw_class: Get the power class of the PSE PI. + * @pi_get_actual_pw: Get actual power of the PSE PI in mW. * @pi_enable: Configure the PSE PI as enabled. * @pi_disable: Configure the PSE PI as disabled. * @pi_get_voltage: Return voltage similarly to get_voltage regulator - * callback. - * @pi_get_current_limit: Get the configured current limit similarly to - * get_current_limit regulator callback. - * @pi_set_current_limit: Configure the current limit similarly to - * set_current_limit regulator callback. - * Should not return an error in case of MAX_PI_CURRENT - * current value set. + * callback in uV. + * @pi_get_pw_limit: Get the configured power limit of the PSE PI in mW. + * @pi_set_pw_limit: Configure the power limit of the PSE PI in mW. + * @pi_get_pw_limit_ranges: Get the supported power limit configuration + * range. The driver is in charge of the memory + * allocation and should return the number of + * ranges. */ struct pse_controller_ops { - int (*ethtool_get_status)(struct pse_controller_dev *pcdev, - unsigned long id, struct netlink_ext_ack *extack, - struct pse_control_status *status); int (*setup_pi_matrix)(struct pse_controller_dev *pcdev); - int (*pi_is_enabled)(struct pse_controller_dev *pcdev, int id); + int (*pi_get_admin_state)(struct pse_controller_dev *pcdev, int id, + struct pse_admin_state *admin_state); + int (*pi_get_pw_status)(struct pse_controller_dev *pcdev, int id, + struct pse_pw_status *pw_status); + int (*pi_get_ext_state)(struct pse_controller_dev *pcdev, int id, + struct pse_ext_state_info *ext_state_info); + int (*pi_get_pw_class)(struct pse_controller_dev *pcdev, int id); + int (*pi_get_actual_pw)(struct pse_controller_dev *pcdev, int id); int (*pi_enable)(struct pse_controller_dev *pcdev, int id); int (*pi_disable)(struct pse_controller_dev *pcdev, int id); int (*pi_get_voltage)(struct pse_controller_dev *pcdev, int id); - int (*pi_get_current_limit)(struct pse_controller_dev *pcdev, - int id); - int (*pi_set_current_limit)(struct pse_controller_dev *pcdev, - int id, int max_uA); + int (*pi_get_pw_limit)(struct pse_controller_dev *pcdev, + int id); + int (*pi_set_pw_limit)(struct pse_controller_dev *pcdev, + int id, int max_mW); + int (*pi_get_pw_limit_ranges)(struct pse_controller_dev *pcdev, int id, + struct pse_pw_limit_ranges *pw_limit_ranges); }; struct module; struct device_node; struct of_phandle_args; struct pse_control; +struct ethtool_pse_control_status; /* PSE PI pairset pinout can either be Alternative A or Alternative B */ enum pse_pi_pairset_pinout { @@ -177,15 +255,13 @@ void pse_control_put(struct pse_control *psec); int pse_ethtool_get_status(struct pse_control *psec, struct netlink_ext_ack *extack, - struct pse_control_status *status); + struct ethtool_pse_control_status *status); int pse_ethtool_set_config(struct pse_control *psec, struct netlink_ext_ack *extack, const struct pse_control_config *config); int pse_ethtool_set_pw_limit(struct pse_control *psec, struct netlink_ext_ack *extack, const unsigned int pw_limit); -int pse_ethtool_get_pw_limit(struct pse_control *psec, - struct netlink_ext_ack *extack); bool pse_has_podl(struct pse_control *psec); bool pse_has_c33(struct pse_control *psec); @@ -203,7 +279,7 @@ static inline void pse_control_put(struct pse_control *psec) static inline int pse_ethtool_get_status(struct pse_control *psec, struct netlink_ext_ack *extack, - struct pse_control_status *status) + struct ethtool_pse_control_status *status) { return -EOPNOTSUPP; } @@ -222,12 +298,6 @@ static inline int pse_ethtool_set_pw_limit(struct pse_control *psec, return -EOPNOTSUPP; } -static inline int pse_ethtool_get_pw_limit(struct pse_control *psec, - struct netlink_ext_ack *extack) -{ - return -EOPNOTSUPP; -} - static inline bool pse_has_podl(struct pse_control *psec) { return false; diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h index 730f77381d55..2503f7625d65 100644 --- a/include/linux/pseudo_fs.h +++ b/include/linux/pseudo_fs.h @@ -5,6 +5,7 @@ struct pseudo_fs_context { const struct super_operations *ops; + const struct export_operations *eops; const struct xattr_handler * const *xattr; const struct dentry_operations *dops; unsigned long magic; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index c892d22ce0a7..0d68d09bedd1 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -307,7 +307,7 @@ static inline u64 adjust_by_scaled_ppm(u64 base, long scaled_ppm) * @info: Structure describing the new clock. * @parent: Pointer to the parent device of the new clock. * - * Returns a valid pointer on success or PTR_ERR on failure. If PHC + * Returns: a valid pointer on success or PTR_ERR on failure. If PHC * support is missing at the configuration level, this function * returns NULL, and drivers are expected to gracefully handle that * case separately. @@ -445,7 +445,7 @@ int ptp_get_vclocks_index(int pclock_index, int **vclock_index); * @hwtstamp: timestamp * @vclock_index: phc index of ptp vclock. * - * Returns converted timestamp, or 0 on error. + * Returns: converted timestamp, or 0 on error. */ ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index); #else diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index fd037c127bb0..551329220e4f 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -615,15 +615,14 @@ static inline int ptr_ring_resize_noprof(struct ptr_ring *r, int size, gfp_t gfp /* * Note: producer lock is nested within consumer lock, so if you * resize you must make sure all uses nest correctly. - * In particular if you consume ring in interrupt or BH context, you must - * disable interrupts/BH when doing so. + * In particular if you consume ring in BH context, you must + * disable BH when doing so. */ -static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings, - unsigned int nrings, - int size, - gfp_t gfp, void (*destroy)(void *)) +static inline int ptr_ring_resize_multiple_bh_noprof(struct ptr_ring **rings, + unsigned int nrings, + int size, gfp_t gfp, + void (*destroy)(void *)) { - unsigned long flags; void ***queues; int i; @@ -638,12 +637,12 @@ static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings, } for (i = 0; i < nrings; ++i) { - spin_lock_irqsave(&(rings[i])->consumer_lock, flags); + spin_lock_bh(&(rings[i])->consumer_lock); spin_lock(&(rings[i])->producer_lock); queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], size, gfp, destroy); spin_unlock(&(rings[i])->producer_lock); - spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); + spin_unlock_bh(&(rings[i])->consumer_lock); } for (i = 0; i < nrings; ++i) @@ -662,8 +661,8 @@ nomem: noqueues: return -ENOMEM; } -#define ptr_ring_resize_multiple(...) \ - alloc_hooks(ptr_ring_resize_multiple_noprof(__VA_ARGS__)) +#define ptr_ring_resize_multiple_bh(...) \ + alloc_hooks(ptr_ring_resize_multiple_bh_noprof(__VA_ARGS__)) static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) { diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 6853e29d9674..a2df509056ac 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -347,6 +347,23 @@ struct pwm_chip { struct pwm_device pwms[] __counted_by(npwm); }; +/** + * pwmchip_supports_waveform() - checks if the given chip supports waveform callbacks + * @chip: The pwm_chip to test + * + * Returns true iff the pwm chip support the waveform functions like + * pwm_set_waveform_might_sleep() and pwm_round_waveform_might_sleep() + */ +static inline bool pwmchip_supports_waveform(struct pwm_chip *chip) +{ + /* + * only check for .write_waveform(). If that is available, + * .round_waveform_tohw() and .round_waveform_fromhw() asserted to be + * available, too, in pwmchip_add(). + */ + return chip->ops->write_waveform != NULL; +} + static inline struct device *pwmchip_parent(const struct pwm_chip *chip) { return chip->dev.parent; diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 7c173aa64e1e..8d2ba3749866 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -211,6 +211,43 @@ rb_add(struct rb_node *node, struct rb_root *tree, } /** + * rb_find_add_cached() - find equivalent @node in @tree, or add @node + * @node: node to look-for / insert + * @tree: tree to search / modify + * @cmp: operator defining the node order + * + * Returns the rb_node matching @node, or NULL when no match is found and @node + * is inserted. + */ +static __always_inline struct rb_node * +rb_find_add_cached(struct rb_node *node, struct rb_root_cached *tree, + int (*cmp)(const struct rb_node *new, const struct rb_node *exist)) +{ + bool leftmost = true; + struct rb_node **link = &tree->rb_root.rb_node; + struct rb_node *parent = NULL; + int c; + + while (*link) { + parent = *link; + c = cmp(node, parent); + + if (c < 0) { + link = &parent->rb_left; + } else if (c > 0) { + link = &parent->rb_right; + leftmost = false; + } else { + return parent; + } + } + + rb_link_node(node, parent, link); + rb_insert_color_cached(node, tree, leftmost); + return NULL; +} + +/** * rb_find_add() - find equivalent @node in @tree, or add @node * @node: node to look-for / insert * @tree: tree to search / modify diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 14dfa6008467..1b11926ddd47 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -30,6 +30,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) * way, we must not access it directly */ #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) +/* + * Return the ->prev pointer of a list_head in an rcu safe way. Don't + * access it directly. + * + * Any list traversed with list_bidir_prev_rcu() must never use + * list_del_rcu(). Doing so will poison the ->prev pointer that + * list_bidir_prev_rcu() relies on, which will result in segfaults. + * To prevent these segfaults, use list_bidir_del_rcu() instead + * of list_del_rcu(). + */ +#define list_bidir_prev_rcu(list) (*((struct list_head __rcu **)(&(list)->prev))) /** * list_tail_rcu - returns the prev pointer of the head of the list @@ -159,6 +170,39 @@ static inline void list_del_rcu(struct list_head *entry) } /** + * list_bidir_del_rcu - deletes entry from list without re-initialization + * @entry: the element to delete from the list. + * + * In contrast to list_del_rcu() doesn't poison the prev pointer thus + * allowing backwards traversal via list_bidir_prev_rcu(). + * + * Note: list_empty() on entry does not return true after this because + * the entry is in a special undefined state that permits RCU-based + * lockfree reverse traversal. In particular this means that we can not + * poison the forward and backwards pointers that may still be used for + * walking the list. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another list-mutation + * primitive, such as list_bidir_del_rcu() or list_add_rcu(), running on + * this same list. However, it is perfectly legal to run concurrently + * with the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + * + * Note that list_del_rcu() and list_bidir_del_rcu() must not be used on + * the same list. + * + * Note that the caller is not permitted to immediately free + * the newly deleted entry. Instead, either synchronize_rcu() + * or call_rcu() must be used to defer freeing until an RCU + * grace period has elapsed. + */ +static inline void list_bidir_del_rcu(struct list_head *entry) +{ + __list_del_entry(entry); +} + +/** * hlist_del_init_rcu - deletes entry from hash list with re-initialization * @n: the element to delete from the hash list. * diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index 303ab9bee155..f9bed3d3f78d 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h @@ -65,4 +65,15 @@ static inline void cond_resched_rcu(void) #endif } +// Has the current task blocked within its current RCU read-side +// critical section? +static inline bool has_rcu_reader_blocked(void) +{ +#ifdef CONFIG_PREEMPT_RCU + return !list_empty(¤t->rcu_node_entry); +#else + return false; +#endif +} + #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index fd41baccbf3e..3a96d068915f 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -506,6 +506,32 @@ struct regmap_range_cfg { unsigned int window_len; }; +/** + * struct regmap_sdw_mbq_cfg - Configuration for Multi-Byte Quantities + * + * @mbq_size: Callback returning the actual size of the given register. + * @deferrable: Callback returning true if the hardware can defer + * transactions to the given register. Deferral should + * only be used by SDCA parts and typically which controls + * are deferrable will be specified in either as a hard + * coded list or from the DisCo tables in the platform + * firmware. + * + * @timeout_us: The time in microseconds after which waiting for a deferred + * transaction should time out. + * @retry_us: The time in microseconds between polls of the function busy + * status whilst waiting for an opportunity to retry a deferred + * transaction. + * + * Provides additional configuration required for SoundWire MBQ register maps. + */ +struct regmap_sdw_mbq_cfg { + int (*mbq_size)(struct device *dev, unsigned int reg); + bool (*deferrable)(struct device *dev, unsigned int reg); + unsigned long timeout_us; + unsigned long retry_us; +}; + struct regmap_async; typedef int (*regmap_hw_write)(void *context, const void *data, @@ -652,6 +678,7 @@ struct regmap *__regmap_init_sdw(struct sdw_slave *sdw, const char *lock_name); struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw, const struct regmap_config *config, + const struct regmap_sdw_mbq_cfg *mbq_config, struct lock_class_key *lock_key, const char *lock_name); struct regmap *__regmap_init_spi_avmm(struct spi_device *spi, @@ -713,6 +740,7 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw, const char *lock_name); struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw, const struct regmap_config *config, + const struct regmap_sdw_mbq_cfg *mbq_config, struct lock_class_key *lock_key, const char *lock_name); struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus, @@ -942,7 +970,22 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); */ #define regmap_init_sdw_mbq(sdw, config) \ __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \ - sdw, config) + sdw, config, NULL) + +/** + * regmap_init_sdw_mbq_cfg() - Initialise MBQ SDW register map with config + * + * @sdw: Device that will be interacted with + * @config: Configuration for register map + * @mbq_config: Properties for the MBQ registers + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define regmap_init_sdw_mbq_cfg(sdw, config, mbq_config) \ + __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \ + sdw, config, mbq_config) /** * regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave @@ -1155,7 +1198,22 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); */ #define devm_regmap_init_sdw_mbq(sdw, config) \ __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, #config, \ - sdw, config) + sdw, config, NULL) + +/** + * devm_regmap_init_sdw_mbq_cfg() - Initialise managed MBQ SDW register map with config + * + * @sdw: Device that will be interacted with + * @config: Configuration for register map + * @mbq_config: Properties for the MBQ registers + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_sdw_mbq_cfg(sdw, config, mbq_config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, \ + #config, sdw, config, mbq_config) /** * devm_regmap_init_slimbus() - Initialise managed register map @@ -1244,7 +1302,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg, void *val, size_t val_len); int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, size_t val_count); -int regmap_multi_reg_read(struct regmap *map, unsigned int *reg, void *val, +int regmap_multi_reg_read(struct regmap *map, const unsigned int *reg, void *val, size_t val_count); int regmap_update_bits_base(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index bcba3935c6f9..ffe912f345ae 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -233,6 +233,11 @@ int regulator_sync_voltage(struct regulator *regulator); int regulator_set_current_limit(struct regulator *regulator, int min_uA, int max_uA); int regulator_get_current_limit(struct regulator *regulator); +int regulator_get_unclaimed_power_budget(struct regulator *regulator); +int regulator_request_power_budget(struct regulator *regulator, + unsigned int pw_req); +void regulator_free_power_budget(struct regulator *regulator, + unsigned int pw); int regulator_set_mode(struct regulator *regulator, unsigned int mode); unsigned int regulator_get_mode(struct regulator *regulator); @@ -526,6 +531,22 @@ static inline int regulator_get_current_limit(struct regulator *regulator) return 0; } +static inline int regulator_get_unclaimed_power_budget(struct regulator *regulator) +{ + return INT_MAX; +} + +static inline int regulator_request_power_budget(struct regulator *regulator, + unsigned int pw_req) +{ + return -EOPNOTSUPP; +} + +static inline void regulator_free_power_budget(struct regulator *regulator, + unsigned int pw) +{ +} + static inline int regulator_set_mode(struct regulator *regulator, unsigned int mode) { diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 5b66caf1695d..4a216fdba354 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -656,6 +656,8 @@ struct regulator_dev { int cached_err; bool use_cached_err; spinlock_t err_lock; + + int pw_requested_mW; }; /* diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index b3db09a7429b..1fc440c5c4c7 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -113,6 +113,7 @@ struct notification_limit { * @min_uA: Smallest current consumers may set. * @max_uA: Largest current consumers may set. * @ilim_uA: Maximum input current. + * @pw_budget_mW: Power budget for the regulator in mW. * @system_load: Load that isn't captured by any consumer requests. * * @over_curr_limits: Limits for acting on over current. @@ -185,6 +186,7 @@ struct regulation_constraints { int max_uA; int ilim_uA; + int pw_budget_mW; int system_load; /* used for coupled regulators */ diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h index 243633c8dceb..b427b5873de1 100644 --- a/include/linux/regulator/pca9450.h +++ b/include/linux/regulator/pca9450.h @@ -10,6 +10,7 @@ enum pca9450_chip_type { PCA9450_TYPE_PCA9450A = 0, PCA9450_TYPE_PCA9450BC, PCA9450_TYPE_PCA9451A, + PCA9450_TYPE_PCA9452, PCA9450_TYPE_AMOUNT, }; diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 997b34197385..6816e4c5f3f0 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -241,7 +241,7 @@ bool rfkill_soft_blocked(struct rfkill *rfkill); * rfkill_find_type - Helper for finding rfkill type by name * @name: the name of the type * - * Returns enum rfkill_type that corresponds to the name. + * Returns: enum rfkill_type that corresponds to the name. */ enum rfkill_type rfkill_find_type(const char *name); diff --git a/include/linux/rolling_buffer.h b/include/linux/rolling_buffer.h new file mode 100644 index 000000000000..ac15b1ffdd83 --- /dev/null +++ b/include/linux/rolling_buffer.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Rolling buffer of folios + * + * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _ROLLING_BUFFER_H +#define _ROLLING_BUFFER_H + +#include <linux/folio_queue.h> +#include <linux/uio.h> + +/* + * Rolling buffer. Whilst the buffer is live and in use, folios and folio + * queue segments can be added to one end by one thread and removed from the + * other end by another thread. The buffer isn't allowed to be empty; it must + * always have at least one folio_queue in it so that neither side has to + * modify both queue pointers. + * + * The iterator in the buffer is extended as buffers are inserted. It can be + * snapshotted to use a segment of the buffer. + */ +struct rolling_buffer { + struct folio_queue *head; /* Producer's insertion point */ + struct folio_queue *tail; /* Consumer's removal point */ + struct iov_iter iter; /* Iterator tracking what's left in the buffer */ + u8 next_head_slot; /* Next slot in ->head */ + u8 first_tail_slot; /* First slot in ->tail */ +}; + +/* + * Snapshot of a rolling buffer. + */ +struct rolling_buffer_snapshot { + struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */ + unsigned char curr_slot; /* Folio currently being read */ + unsigned char curr_order; /* Order of folio */ +}; + +/* Marks to store per-folio in the internal folio_queue structs. */ +#define ROLLBUF_MARK_1 BIT(0) +#define ROLLBUF_MARK_2 BIT(1) + +int rolling_buffer_init(struct rolling_buffer *roll, unsigned int rreq_id, + unsigned int direction); +int rolling_buffer_make_space(struct rolling_buffer *roll); +ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll, + struct readahead_control *ractl, + struct folio_batch *put_batch); +ssize_t rolling_buffer_append(struct rolling_buffer *roll, struct folio *folio, + unsigned int flags); +struct folio_queue *rolling_buffer_delete_spent(struct rolling_buffer *roll); +void rolling_buffer_clear(struct rolling_buffer *roll); + +static inline void rolling_buffer_advance(struct rolling_buffer *roll, size_t amount) +{ + iov_iter_advance(&roll->iter, amount); +} + +#endif /* _ROLLING_BUFFER_H */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 14b88f551920..4bc2ee0b10b0 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -78,7 +78,7 @@ static inline bool lockdep_rtnl_is_held(void) * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL * @p: The pointer to read, prior to dereferencing * - * Return the value of the specified RCU-protected pointer, but omit + * Return: the value of the specified RCU-protected pointer, but omit * the READ_ONCE(), because caller holds RTNL. */ #define rtnl_dereference(p) \ @@ -102,6 +102,7 @@ void __rtnl_net_unlock(struct net *net); void rtnl_net_lock(struct net *net); void rtnl_net_unlock(struct net *net); int rtnl_net_trylock(struct net *net); +int rtnl_net_lock_killable(struct net *net); int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b); bool rtnl_net_is_locked(struct net *net); @@ -138,6 +139,11 @@ static inline int rtnl_net_trylock(struct net *net) return rtnl_trylock(); } +static inline int rtnl_net_lock_killable(struct net *net) +{ + return rtnl_lock_killable(); +} + static inline void ASSERT_RTNL_NET(struct net *net) { ASSERT_RTNL(); @@ -178,6 +184,12 @@ void rtnetlink_init(void); void __rtnl_unlock(void); void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); +/* Shared by rtnl_fdb_dump() and various ndo_fdb_dump() helpers. */ +struct ndo_fdb_dump_context { + unsigned long ifindex; + unsigned long fdb_idx; +}; + extern int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, diff --git a/include/linux/sched.h b/include/linux/sched.h index 64934e0830af..9632e3318e0d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -398,6 +398,12 @@ struct sched_info { /* Time spent waiting on a runqueue: */ unsigned long long run_delay; + /* Max time spent waiting on a runqueue: */ + unsigned long long max_run_delay; + + /* Min time spent waiting on a runqueue: */ + unsigned long long min_run_delay; + /* Timestamps: */ /* When did we last run on a CPU? */ @@ -944,6 +950,7 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; + unsigned sched_task_hot:1; /* Force alignment to the next boundary: */ unsigned :0; @@ -1374,6 +1381,15 @@ struct task_struct { * with respect to preemption. */ unsigned long rseq_event_mask; +# ifdef CONFIG_DEBUG_RSEQ + /* + * This is a place holder to save a copy of the rseq fields for + * validation of read-only fields. The struct rseq has a + * variable-length array at the end, so it cannot be used + * directly. Reserve a size large enough for the known fields. + */ + char rseq_fields[sizeof(struct rseq)]; +# endif #endif #ifdef CONFIG_SCHED_MM_CID @@ -1944,11 +1960,10 @@ static inline void kick_process(struct task_struct *tsk) { } #endif extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); - -static inline void set_task_comm(struct task_struct *tsk, const char *from) -{ - __set_task_comm(tsk, from, false); -} +#define set_task_comm(tsk, from) ({ \ + BUILD_BUG_ON(sizeof(from) != TASK_COMM_LEN); \ + __set_task_comm(tsk, from, false); \ +}) /* * - Why not use task_lock()? diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h index 412cdaba33eb..17e04859b9a4 100644 --- a/include/linux/sched/hotplug.h +++ b/include/linux/sched/hotplug.h @@ -18,10 +18,6 @@ extern int sched_cpu_dying(unsigned int cpu); # define sched_cpu_dying NULL #endif -#ifdef CONFIG_HOTPLUG_CPU -extern void idle_task_exit(void); -#else static inline void idle_task_exit(void) {} -#endif #endif /* _LINUX_SCHED_HOTPLUG_H */ diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index 2b461129d1fa..d8501f4709b5 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -7,16 +7,21 @@ #include <linux/tick.h> enum hk_type { - HK_TYPE_TIMER, - HK_TYPE_RCU, - HK_TYPE_MISC, - HK_TYPE_SCHED, - HK_TYPE_TICK, HK_TYPE_DOMAIN, - HK_TYPE_WQ, HK_TYPE_MANAGED_IRQ, - HK_TYPE_KTHREAD, - HK_TYPE_MAX + HK_TYPE_KERNEL_NOISE, + HK_TYPE_MAX, + + /* + * The following housekeeping types are only set by the nohz_full + * boot commandline option. So they can share the same value. + */ + HK_TYPE_TICK = HK_TYPE_KERNEL_NOISE, + HK_TYPE_TIMER = HK_TYPE_KERNEL_NOISE, + HK_TYPE_RCU = HK_TYPE_KERNEL_NOISE, + HK_TYPE_MISC = HK_TYPE_KERNEL_NOISE, + HK_TYPE_WQ = HK_TYPE_KERNEL_NOISE, + HK_TYPE_KTHREAD = HK_TYPE_KERNEL_NOISE }; #ifdef CONFIG_CPU_ISOLATION diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 4237daa5ac7a..7f3dbafe1817 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -114,7 +114,10 @@ struct sched_domain { unsigned int lb_count[CPU_MAX_IDLE_TYPES]; unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; - unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance_load[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance_util[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance_task[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance_misfit[CPU_MAX_IDLE_TYPES]; unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; @@ -140,9 +143,7 @@ struct sched_domain { unsigned int ttwu_move_affine; unsigned int ttwu_move_balance; #endif -#ifdef CONFIG_SCHED_DEBUG char *name; -#endif union { void *private; /* used during construction */ struct rcu_head rcu; /* used during destruction */ @@ -198,18 +199,12 @@ struct sched_domain_topology_level { int flags; int numa_level; struct sd_data data; -#ifdef CONFIG_SCHED_DEBUG char *name; -#endif }; extern void __init set_sched_topology(struct sched_domain_topology_level *tl); -#ifdef CONFIG_SCHED_DEBUG # define SD_INIT_NAME(type) .name = #type -#else -# define SD_INIT_NAME(type) -#endif #else /* CONFIG_SMP */ diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index 06cd8fb2f409..0f28b4623ad4 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -63,4 +63,38 @@ extern void wake_q_add(struct wake_q_head *head, struct task_struct *task); extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task); extern void wake_up_q(struct wake_q_head *head); +/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */ +static inline +void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q) +{ + guard(preempt)(); + raw_spin_unlock(lock); + if (wake_q) { + wake_up_q(wake_q); + wake_q_init(wake_q); + } +} + +static inline +void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q) +{ + guard(preempt)(); + raw_spin_unlock_irq(lock); + if (wake_q) { + wake_up_q(wake_q); + wake_q_init(wake_q); + } +} + +static inline +void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags, + struct wake_q_head *wake_q) +{ + guard(preempt)(); + raw_spin_unlock_irqrestore(lock, flags); + if (wake_q) { + wake_up_q(wake_q); + wake_q_init(wake_q); + } +} #endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/include/linux/scmi_imx_protocol.h b/include/linux/scmi_imx_protocol.h index 066216f1357a..53b356a26414 100644 --- a/include/linux/scmi_imx_protocol.h +++ b/include/linux/scmi_imx_protocol.h @@ -13,10 +13,11 @@ #include <linux/notifier.h> #include <linux/types.h> -enum scmi_nxp_protocol { - SCMI_PROTOCOL_IMX_BBM = 0x81, - SCMI_PROTOCOL_IMX_MISC = 0x84, -}; +#define SCMI_PROTOCOL_IMX_BBM 0x81 +#define SCMI_PROTOCOL_IMX_MISC 0x84 + +#define SCMI_IMX_VENDOR "NXP" +#define SCMI_IMX_SUBVENDOR "IMX" struct scmi_imx_bbm_proto_ops { int (*rtc_time_set)(const struct scmi_protocol_handle *ph, u32 id, diff --git a/include/linux/security.h b/include/linux/security.h index cbdba435b798..980b6c207cad 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -226,6 +226,18 @@ extern unsigned long dac_mmap_min_addr; #endif /* + * A "security context" is the text representation of + * the information used by LSMs. + * This structure contains the string, its length, and which LSM + * it is useful for. + */ +struct lsm_context { + char *context; /* Provided by the module */ + u32 len; + int id; /* Identifies the module */ +}; + +/* * Values used in the task_security_ops calls */ /* setuid or setgid, id0 == uid or gid */ @@ -378,8 +390,8 @@ int security_sb_clone_mnt_opts(const struct super_block *oldsb, int security_move_mount(const struct path *from_path, const struct path *to_path); int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, - const char **xattr_name, void **ctx, - u32 *ctxlen); + const char **xattr_name, + struct lsm_context *lsmcxt); int security_dentry_create_files_as(struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, @@ -553,14 +565,14 @@ int security_getprocattr(struct task_struct *p, int lsmid, const char *name, int security_setprocattr(int lsmid, const char *name, void *value, size_t size); int security_netlink_send(struct sock *sk, struct sk_buff *skb); int security_ismaclabel(const char *name); -int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); -int security_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata, u32 *seclen); +int security_secid_to_secctx(u32 secid, struct lsm_context *cp); +int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp); int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); -void security_release_secctx(char *secdata, u32 seclen); +void security_release_secctx(struct lsm_context *cp); void security_inode_invalidate_secctx(struct inode *inode); int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); -int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); +int security_inode_getsecctx(struct inode *inode, struct lsm_context *cp); int security_locked_down(enum lockdown_reason what); int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len, void *val, size_t val_len, u64 id, u64 flags); @@ -852,8 +864,7 @@ static inline int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, const char **xattr_name, - void **ctx, - u32 *ctxlen) + struct lsm_context *lsmcxt) { return -EOPNOTSUPP; } @@ -1526,14 +1537,13 @@ static inline int security_ismaclabel(const char *name) return 0; } -static inline int security_secid_to_secctx(u32 secid, char **secdata, - u32 *seclen) +static inline int security_secid_to_secctx(u32 secid, struct lsm_context *cp) { return -EOPNOTSUPP; } static inline int security_lsmprop_to_secctx(struct lsm_prop *prop, - char **secdata, u32 *seclen) + struct lsm_context *cp) { return -EOPNOTSUPP; } @@ -1545,7 +1555,7 @@ static inline int security_secctx_to_secid(const char *secdata, return -EOPNOTSUPP; } -static inline void security_release_secctx(char *secdata, u32 seclen) +static inline void security_release_secctx(struct lsm_context *cp) { } @@ -1561,7 +1571,8 @@ static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 { return -EOPNOTSUPP; } -static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) +static inline int security_inode_getsecctx(struct inode *inode, + struct lsm_context *cp) { return -EOPNOTSUPP; } diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 5298765d6ca4..5ce48eab7a2a 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -272,7 +272,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) ({ \ unsigned __seq; \ \ - while ((__seq = seqprop_sequence(s)) & 1) \ + while (unlikely((__seq = seqprop_sequence(s)) & 1)) \ cpu_relax(); \ \ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ @@ -319,6 +319,29 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) }) /** + * raw_seqcount_try_begin() - begin a seqcount_t read critical section + * w/o lockdep and w/o counter stabilization + * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants + * @start: count to be passed to read_seqcount_retry() + * + * Similar to raw_seqcount_begin(), except it enables eliding the critical + * section entirely if odd, instead of doing the speculation knowing it will + * fail. + * + * Useful when counter stabilization is more or less equivalent to taking + * the lock and there is a slowpath that does that. + * + * If true, start will be set to the (even) sequence count read. + * + * Return: true when a read critical section is started. + */ +#define raw_seqcount_try_begin(s, start) \ +({ \ + start = raw_read_seqcount(s); \ + !(start & 1); \ +}) + +/** * raw_seqcount_begin() - begin a seqcount_t read critical section w/o * lockdep and w/o counter stabilization * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index 926496c9cc9c..bf178238a308 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -199,17 +199,18 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); } -static inline int skb_array_resize_multiple_noprof(struct skb_array **rings, - int nrings, unsigned int size, - gfp_t gfp) +static inline int skb_array_resize_multiple_bh_noprof(struct skb_array **rings, + int nrings, + unsigned int size, + gfp_t gfp) { BUILD_BUG_ON(offsetof(struct skb_array, ring)); - return ptr_ring_resize_multiple_noprof((struct ptr_ring **)rings, - nrings, size, gfp, - __skb_array_destroy_skb); + return ptr_ring_resize_multiple_bh_noprof((struct ptr_ring **)rings, + nrings, size, gfp, + __skb_array_destroy_skb); } -#define skb_array_resize_multiple(...) \ - alloc_hooks(skb_array_resize_multiple_noprof(__VA_ARGS__)) +#define skb_array_resize_multiple_bh(...) \ + alloc_hooks(skb_array_resize_multiple_bh_noprof(__VA_ARGS__)) static inline void skb_array_cleanup(struct skb_array *a) { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 58009fa66102..bb2b751d274a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -608,11 +608,19 @@ struct skb_shared_info { * Warning : all fields before dataref are cleared in __alloc_skb() */ atomic_t dataref; - unsigned int xdp_frags_size; - /* Intermediate layers must ensure that destructor_arg - * remains valid until skb destructor */ - void * destructor_arg; + union { + struct { + u32 xdp_frags_size; + u32 xdp_frags_truesize; + }; + + /* + * Intermediate layers must ensure that destructor_arg + * remains valid until skb destructor. + */ + void *destructor_arg; + }; /* must be last field, see pskb_expand_head() */ skb_frag_t frags[MAX_SKB_FRAGS]; @@ -1134,7 +1142,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb) * skb_dst - returns skb dst_entry * @skb: buffer * - * Returns skb dst_entry, regardless of reference taken or not. + * Returns: skb dst_entry, regardless of reference taken or not. */ static inline struct dst_entry *skb_dst(const struct sk_buff *skb) { @@ -1222,7 +1230,7 @@ static inline bool skb_wifi_acked_valid(const struct sk_buff *skb) * skb_unref - decrement the skb's reference count * @skb: buffer * - * Returns true if we can free the skb. + * Returns: true if we can free the skb. */ static inline bool skb_unref(struct sk_buff *skb) { @@ -1344,7 +1352,7 @@ struct sk_buff_fclones { * @sk: socket * @skb: buffer * - * Returns true if skb is a fast clone, and its clone is not freed. + * Returns: true if skb is a fast clone, and its clone is not freed. * Some drivers call skb_orphan() in their ndo_start_xmit(), * so we also check that didn't happen. */ @@ -3516,7 +3524,7 @@ static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask) * A page shouldn't be considered for reusing/recycling if it was allocated * under memory pressure or at a distant memory node. * - * Returns false if this page should be returned to page allocator, true + * Returns: false if this page should be returned to page allocator, true * otherwise. */ static inline bool dev_page_is_reusable(const struct page *page) @@ -3627,13 +3635,13 @@ static inline netmem_ref skb_frag_netmem(const skb_frag_t *frag) int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, unsigned int headroom); int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, - struct bpf_prog *prog); + const struct bpf_prog *prog); /** * skb_frag_address - gets the address of the data contained in a paged fragment * @frag: the paged fragment buffer * - * Returns the address of the data within @frag. The page must already + * Returns: the address of the data within @frag. The page must already * be mapped. */ static inline void *skb_frag_address(const skb_frag_t *frag) @@ -3648,7 +3656,7 @@ static inline void *skb_frag_address(const skb_frag_t *frag) * skb_frag_address_safe - gets the address of the data contained in a paged fragment * @frag: the paged fragment buffer * - * Returns the address of the data within @frag. Checks that the page + * Returns: the address of the data within @frag. Checks that the page * is mapped and returns %NULL otherwise. */ static inline void *skb_frag_address_safe(const skb_frag_t *frag) @@ -3674,7 +3682,7 @@ static inline void skb_frag_page_copy(skb_frag_t *fragto, bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio); /** - * skb_frag_dma_map - maps a paged fragment via the DMA API + * __skb_frag_dma_map - maps a paged fragment via the DMA API * @dev: the device to map the fragment to * @frag: the paged fragment to map * @offset: the offset within the fragment (starting at the @@ -3684,15 +3692,36 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio); * * Maps the page associated with @frag to @device. */ -static inline dma_addr_t skb_frag_dma_map(struct device *dev, - const skb_frag_t *frag, - size_t offset, size_t size, - enum dma_data_direction dir) +static inline dma_addr_t __skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) { return dma_map_page(dev, skb_frag_page(frag), skb_frag_off(frag) + offset, size, dir); } +#define skb_frag_dma_map(dev, frag, ...) \ + CONCATENATE(_skb_frag_dma_map, \ + COUNT_ARGS(__VA_ARGS__))(dev, frag, ##__VA_ARGS__) + +#define __skb_frag_dma_map1(dev, frag, offset, uf, uo) ({ \ + const skb_frag_t *uf = (frag); \ + size_t uo = (offset); \ + \ + __skb_frag_dma_map(dev, uf, uo, skb_frag_size(uf) - uo, \ + DMA_TO_DEVICE); \ +}) +#define _skb_frag_dma_map1(dev, frag, offset) \ + __skb_frag_dma_map1(dev, frag, offset, __UNIQUE_ID(frag_), \ + __UNIQUE_ID(offset_)) +#define _skb_frag_dma_map0(dev, frag) \ + _skb_frag_dma_map1(dev, frag, 0) +#define _skb_frag_dma_map2(dev, frag, offset, size) \ + __skb_frag_dma_map(dev, frag, offset, size, DMA_TO_DEVICE) +#define _skb_frag_dma_map3(dev, frag, offset, size, dir) \ + __skb_frag_dma_map(dev, frag, offset, size, dir) + static inline struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) { @@ -3890,7 +3919,7 @@ static inline int skb_linearize(struct sk_buff *skb) * skb_has_shared_frag - can any frag be overwritten * @skb: buffer to test * - * Return true if the skb has at least one frag that might be modified + * Return: true if the skb has at least one frag that might be modified * by an external entity (as in vmsplice()/sendfile()) */ static inline bool skb_has_shared_frag(const struct sk_buff *skb) @@ -4612,7 +4641,7 @@ static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb) /* Check if we need to perform checksum complete validation. * - * Returns true if checksum complete is needed, false otherwise + * Returns: true if checksum complete is needed, false otherwise * (either checksum is unnecessary or zero checksum is allowed). */ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, diff --git a/include/linux/slab.h b/include/linux/slab.h index 10a971c2bde3..09eedaecf120 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -1099,5 +1099,6 @@ unsigned int kmem_cache_size(struct kmem_cache *s); size_t kmalloc_size_roundup(size_t size); void __init kmem_cache_init_late(void); +void __init kvfree_rcu_init(void); #endif /* _LINUX_SLAB_H */ diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 5bee6f7fc400..0c3906e8ad19 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -391,14 +391,6 @@ int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa); */ int cmdq_pkt_eoc(struct cmdq_pkt *pkt); -/** - * cmdq_pkt_finalize() - Append EOC and jump command to pkt. - * @pkt: the CMDQ packet - * - * Return: 0 for success; else the error code is returned - */ -int cmdq_pkt_finalize(struct cmdq_pkt *pkt); - #else /* IS_ENABLED(CONFIG_MTK_CMDQ) */ static inline int cmdq_dev_get_client_reg(struct device *dev, @@ -519,11 +511,6 @@ static inline int cmdq_pkt_eoc(struct cmdq_pkt *pkt) return -EINVAL; } -static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt) -{ - return -EINVAL; -} - #endif /* IS_ENABLED(CONFIG_MTK_CMDQ) */ #endif /* __MTK_CMDQ_H__ */ diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h index 658b10fa5b20..0a5939285583 100644 --- a/include/linux/soundwire/sdw_registers.h +++ b/include/linux/soundwire/sdw_registers.h @@ -4,6 +4,9 @@ #ifndef __SDW_REGISTERS_H #define __SDW_REGISTERS_H +#include <linux/bitfield.h> +#include <linux/bits.h> + /* * SDW registers as defined by MIPI 1.2 Spec */ @@ -329,16 +332,27 @@ * 2:0 Control Number[2:0] */ -#define SDW_SDCA_CTL(fun, ent, ctl, ch) (BIT(30) | \ - (((fun) & 0x7) << 22) | \ - (((ent) & 0x40) << 15) | \ - (((ent) & 0x3f) << 7) | \ - (((ctl) & 0x30) << 15) | \ - (((ctl) & 0x0f) << 3) | \ - (((ch) & 0x38) << 12) | \ - ((ch) & 0x07)) +#define SDW_SDCA_CTL(fun, ent, ctl, ch) (BIT(30) | \ + (((fun) & GENMASK(2, 0)) << 22) | \ + (((ent) & BIT(6)) << 15) | \ + (((ent) & GENMASK(5, 0)) << 7) | \ + (((ctl) & GENMASK(5, 4)) << 15) | \ + (((ctl) & GENMASK(3, 0)) << 3) | \ + (((ch) & GENMASK(5, 3)) << 12) | \ + ((ch) & GENMASK(2, 0))) + +#define SDW_SDCA_CTL_FUNC(reg) FIELD_GET(GENMASK(24, 22), (reg)) +#define SDW_SDCA_CTL_ENT(reg) ((FIELD_GET(BIT(21), (reg)) << 6) | \ + FIELD_GET(GENMASK(12, 7), (reg))) +#define SDW_SDCA_CTL_CSEL(reg) ((FIELD_GET(GENMASK(20, 19), (reg)) << 4) | \ + FIELD_GET(GENMASK(6, 3), (reg))) +#define SDW_SDCA_CTL_CNUM(reg) ((FIELD_GET(GENMASK(17, 15), (reg)) << 3) | \ + FIELD_GET(GENMASK(2, 0), (reg))) #define SDW_SDCA_MBQ_CTL(reg) ((reg) | BIT(13)) #define SDW_SDCA_NEXT_CTL(reg) ((reg) | BIT(14)) +/* Check the reserved and fixed bits in address */ +#define SDW_SDCA_VALID_CTL(reg) (((reg) & (GENMASK(31, 25) | BIT(18) | BIT(13))) == BIT(30)) + #endif /* __SDW_REGISTERS_H */ diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index c46d2b8029be..c4830dfaff3d 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -15,16 +15,32 @@ #define SPI_MEM_OP_CMD(__opcode, __buswidth) \ { \ + .nbytes = 1, \ .buswidth = __buswidth, \ .opcode = __opcode, \ + } + +#define SPI_MEM_DTR_OP_CMD(__opcode, __buswidth) \ + { \ .nbytes = 1, \ + .opcode = __opcode, \ + .buswidth = __buswidth, \ + .dtr = true, \ } #define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \ { \ .nbytes = __nbytes, \ + .buswidth = __buswidth, \ + .val = __val, \ + } + +#define SPI_MEM_DTR_OP_ADDR(__nbytes, __val, __buswidth) \ + { \ + .nbytes = __nbytes, \ .val = __val, \ .buswidth = __buswidth, \ + .dtr = true, \ } #define SPI_MEM_OP_NO_ADDR { } @@ -35,22 +51,47 @@ .buswidth = __buswidth, \ } +#define SPI_MEM_DTR_OP_DUMMY(__nbytes, __buswidth) \ + { \ + .nbytes = __nbytes, \ + .buswidth = __buswidth, \ + .dtr = true, \ + } + #define SPI_MEM_OP_NO_DUMMY { } #define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth) \ { \ + .buswidth = __buswidth, \ + .dir = SPI_MEM_DATA_IN, \ + .nbytes = __nbytes, \ + .buf.in = __buf, \ + } + +#define SPI_MEM_DTR_OP_DATA_IN(__nbytes, __buf, __buswidth) \ + { \ .dir = SPI_MEM_DATA_IN, \ .nbytes = __nbytes, \ .buf.in = __buf, \ .buswidth = __buswidth, \ + .dtr = true, \ } #define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth) \ { \ + .buswidth = __buswidth, \ + .dir = SPI_MEM_DATA_OUT, \ + .nbytes = __nbytes, \ + .buf.out = __buf, \ + } + +#define SPI_MEM_DTR_OP_DATA_OUT(__nbytes, __buf, __buswidth) \ + { \ .dir = SPI_MEM_DATA_OUT, \ .nbytes = __nbytes, \ .buf.out = __buf, \ .buswidth = __buswidth, \ + .dtr = true, \ } #define SPI_MEM_OP_NO_DATA { } @@ -68,6 +109,9 @@ enum spi_mem_data_dir { SPI_MEM_DATA_OUT, }; +#define SPI_MEM_OP_MAX_FREQ(__freq) \ + .max_freq = __freq + /** * struct spi_mem_op - describes a SPI memory operation * @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is @@ -97,6 +141,9 @@ enum spi_mem_data_dir { * operation does not involve transferring data * @data.buf.in: input buffer (must be DMA-able) * @data.buf.out: output buffer (must be DMA-able) + * @max_freq: frequency limitation wrt this operation. 0 means there is no + * specific constraint and the highest achievable frequency can be + * attempted. */ struct spi_mem_op { struct { @@ -135,14 +182,17 @@ struct spi_mem_op { const void *out; } buf; } data; + + unsigned int max_freq; }; -#define SPI_MEM_OP(__cmd, __addr, __dummy, __data) \ +#define SPI_MEM_OP(__cmd, __addr, __dummy, __data, ...) \ { \ .cmd = __cmd, \ .addr = __addr, \ .dummy = __dummy, \ .data = __data, \ + __VA_ARGS__ \ } /** @@ -302,11 +352,13 @@ struct spi_controller_mem_ops { * @ecc: Supports operations with error correction * @swap16: Supports swapping bytes on a 16 bit boundary when configured in * Octal DTR + * @per_op_freq: Supports per operation frequency switching */ struct spi_controller_mem_caps { bool dtr; bool ecc; bool swap16; + bool per_op_freq; }; #define spi_mem_controller_is_capable(ctlr, cap) \ @@ -371,6 +423,8 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, #endif /* CONFIG_SPI_MEM */ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op); +void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op); +u64 spi_mem_calc_op_duration(struct spi_mem_op *op); bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op); diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 08339eb8a01c..d7ba46e74f58 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -43,6 +43,12 @@ int init_srcu_struct(struct srcu_struct *ssp); #define __SRCU_DEP_MAP_INIT(srcu_name) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +/* Values for SRCU Tree srcu_data ->srcu_reader_flavor, but also used by rcutorture. */ +#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock(). +#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe(). +#define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite(). +#define SRCU_READ_FLAVOR_ALL 0x7 // All of the above. + #ifdef CONFIG_TINY_SRCU #include <linux/srcutiny.h> #elif defined(CONFIG_TREE_SRCU) @@ -232,13 +238,14 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) * a mutex that is held elsewhere while calling synchronize_srcu() or * synchronize_srcu_expedited(). * - * The return value from srcu_read_lock() must be passed unaltered - * to the matching srcu_read_unlock(). Note that srcu_read_lock() and - * the matching srcu_read_unlock() must occur in the same context, for - * example, it is illegal to invoke srcu_read_unlock() in an irq handler - * if the matching srcu_read_lock() was invoked in process context. Or, - * for that matter to invoke srcu_read_unlock() from one task and the - * matching srcu_read_lock() from another. + * The return value from srcu_read_lock() is guaranteed to be + * non-negative. This value must be passed unaltered to the matching + * srcu_read_unlock(). Note that srcu_read_lock() and the matching + * srcu_read_unlock() must occur in the same context, for example, it is + * illegal to invoke srcu_read_unlock() in an irq handler if the matching + * srcu_read_lock() was invoked in process context. Or, for that matter to + * invoke srcu_read_unlock() from one task and the matching srcu_read_lock() + * from another. */ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) { diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 490aeecc6bb4..b17814c9d1c7 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -26,6 +26,7 @@ struct srcu_data { atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */ atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */ int srcu_reader_flavor; /* Reader flavor for srcu_struct structure? */ + /* Values: SRCU_READ_FLAVOR_.* */ /* Update-side state. */ spinlock_t __private lock ____cacheline_internodealigned_in_smp; @@ -43,11 +44,6 @@ struct srcu_data { struct srcu_struct *ssp; }; -/* Values for ->srcu_reader_flavor. */ -#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock(). -#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe(). -#define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite(). - /* * Node in SRCU combining tree, similar in function to rcu_data. */ @@ -258,7 +254,7 @@ static inline void srcu_check_read_flavor_lite(struct srcu_struct *ssp) if (likely(READ_ONCE(sdp->srcu_reader_flavor) & SRCU_READ_FLAVOR_LITE)) return; - // Note that the cmpxchg() in srcu_check_read_flavor() is fully ordered. + // Note that the cmpxchg() in __srcu_check_read_flavor() is fully ordered. __srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE); } diff --git a/include/linux/stat.h b/include/linux/stat.h index 3d900c86981c..9d8382e23a9c 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -52,6 +52,7 @@ struct kstat { u64 mnt_id; u32 dio_mem_align; u32 dio_offset_align; + u32 dio_read_offset_align; u64 change_cookie; u64 subvol; u32 atomic_write_unit_min; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index d79ff252cfdc..c9878a612e53 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -33,7 +33,9 @@ #define STMMAC_CSR_20_35M 0x2 /* MDC = clk_scr_i/16 */ #define STMMAC_CSR_35_60M 0x3 /* MDC = clk_scr_i/26 */ #define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */ -#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */ +#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/124 */ +#define STMMAC_CSR_300_500M 0x6 /* MDC = clk_scr_i/204 */ +#define STMMAC_CSR_500_800M 0x7 /* MDC = clk_scr_i/324 */ /* MTL algorithms identifiers */ #define MTL_TX_ALGORITHM_WRR 0x0 @@ -250,8 +252,8 @@ struct plat_stmmacenet_data { struct clk *stmmac_clk; struct clk *pclk; struct clk *clk_ptp_ref; - unsigned int clk_ptp_rate; - unsigned int clk_ref_rate; + unsigned long clk_ptp_rate; + unsigned long clk_ref_rate; unsigned int mult_fact_100ns; s32 ptp_max_adj; u32 cdc_error_adj; @@ -263,7 +265,7 @@ struct plat_stmmacenet_data { int mac_port_sel_speed; int has_xgmac; u8 vlan_fail_q; - unsigned int eee_usecs_rate; + unsigned long eee_usecs_rate; struct pci_dev *pdev; int int_snapshot_num; int msi_mac_vec; diff --git a/include/linux/string.h b/include/linux/string.h index 493ac4862c77..86d5d352068b 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -4,6 +4,7 @@ #include <linux/args.h> #include <linux/array_size.h> +#include <linux/cleanup.h> /* for DEFINE_FREE() */ #include <linux/compiler.h> /* for inline */ #include <linux/types.h> /* for size_t */ #include <linux/stddef.h> /* for NULL */ @@ -312,6 +313,8 @@ extern void *kmemdup_array(const void *src, size_t count, size_t element_size, g extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); +DEFINE_FREE(argv_free, char **, if (!IS_ERR_OR_NULL(_T)) argv_free(_T)) + /* lib/cmdline.c */ extern int get_option(char **str, int *pint); extern char *get_options(const char *str, int nints, int *ints); diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 35766963dd14..e783132e481f 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -222,6 +222,8 @@ static inline bool cache_is_expired(struct cache_detail *detail, struct cache_he return detail->flush_time >= h->last_refresh; } +extern int cache_check_rcu(struct cache_detail *detail, + struct cache_head *h, struct cache_req *rqstp); extern int cache_check(struct cache_detail *detail, struct cache_head *h, struct cache_req *rqstp); extern void cache_flush(void); diff --git a/include/linux/sunrpc/gss_asn1.h b/include/linux/sunrpc/gss_asn1.h deleted file mode 100644 index 3ccecd0ad229..000000000000 --- a/include/linux/sunrpc/gss_asn1.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * linux/include/linux/sunrpc/gss_asn1.h - * - * minimal asn1 for generic encoding/decoding of gss tokens - * - * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h, - * lib/gssapi/krb5/gssapiP_krb5.h, and others - * - * Copyright (c) 2000 The Regents of the University of Michigan. - * All rights reserved. - * - * Andy Adamson <andros@umich.edu> - */ - -/* - * Copyright 1995 by the Massachusetts Institute of Technology. - * All Rights Reserved. - * - * Export of this software from the United States of America may - * require a specific license from the United States Government. - * It is the responsibility of any person or organization contemplating - * export to obtain such a license before exporting. - * - * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and - * distribute this software and its documentation for any purpose and - * without fee is hereby granted, provided that the above copyright - * notice appear in all copies and that both that copyright notice and - * this permission notice appear in supporting documentation, and that - * the name of M.I.T. not be used in advertising or publicity pertaining - * to distribution of the software without specific, written prior - * permission. Furthermore if you modify this software you must label - * your software as modified software and not distribute it in such a - * fashion that it might be confused with the original M.I.T. software. - * M.I.T. makes no representations about the suitability of - * this software for any purpose. It is provided "as is" without express - * or implied warranty. - * - */ - - -#include <linux/sunrpc/gss_api.h> - -#define SIZEOF_INT 4 - -/* from gssapi_err_generic.h */ -#define G_BAD_SERVICE_NAME (-2045022976L) -#define G_BAD_STRING_UID (-2045022975L) -#define G_NOUSER (-2045022974L) -#define G_VALIDATE_FAILED (-2045022973L) -#define G_BUFFER_ALLOC (-2045022972L) -#define G_BAD_MSG_CTX (-2045022971L) -#define G_WRONG_SIZE (-2045022970L) -#define G_BAD_USAGE (-2045022969L) -#define G_UNKNOWN_QOP (-2045022968L) -#define G_NO_HOSTNAME (-2045022967L) -#define G_BAD_HOSTNAME (-2045022966L) -#define G_WRONG_MECH (-2045022965L) -#define G_BAD_TOK_HEADER (-2045022964L) -#define G_BAD_DIRECTION (-2045022963L) -#define G_TOK_TRUNC (-2045022962L) -#define G_REFLECT (-2045022961L) -#define G_WRONG_TOKID (-2045022960L) - -#define g_OID_equal(o1,o2) \ - (((o1)->len == (o2)->len) && \ - (memcmp((o1)->data,(o2)->data,(int) (o1)->len) == 0)) - -u32 g_verify_token_header( - struct xdr_netobj *mech, - int *body_size, - unsigned char **buf_in, - int toksize); - -int g_token_size( - struct xdr_netobj *mech, - unsigned int body_size); - -void g_make_token_header( - struct xdr_netobj *mech, - int body_size, - unsigned char **buf); diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index 78a80bf3fdcb..43950b5237c8 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -40,7 +40,6 @@ #include <crypto/skcipher.h> #include <linux/sunrpc/auth_gss.h> #include <linux/sunrpc/gss_err.h> -#include <linux/sunrpc/gss_asn1.h> /* Length of constant used in key derivation */ #define GSS_KRB5_K5CLENGTH (5) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index e68fecf6eab5..74658cca0f38 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -72,16 +72,12 @@ struct svc_serv { spinlock_t sv_lock; unsigned int sv_nprogs; /* Number of sv_programs */ unsigned int sv_nrthreads; /* # of server threads */ - unsigned int sv_maxconn; /* max connections allowed or - * '0' causing max to be based - * on number of threads. */ - unsigned int sv_max_payload; /* datagram payload size */ unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */ unsigned int sv_xdrsize; /* XDR buffer size */ struct list_head sv_permsocks; /* all permanent sockets */ struct list_head sv_tempsocks; /* all temporary sockets */ - int sv_tmpcnt; /* count of temporary sockets */ + int sv_tmpcnt; /* count of temporary "valid" sockets */ struct timer_list sv_temptimer; /* timer for aging temporary sockets */ char * sv_name; /* service name */ @@ -327,12 +323,7 @@ static inline bool svc_thread_should_stop(struct svc_rqst *rqstp) */ static inline void svc_thread_init_status(struct svc_rqst *rqstp, int err) { - rqstp->rq_err = err; - /* memory barrier ensures assignment to error above is visible before - * waitqueue_active() test below completes. - */ - smp_mb(); - wake_up_var(&rqstp->rq_err); + store_release_wake_up(&rqstp->rq_err, err); if (err) kthread_exit(1); } diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 0981e35a9fed..72be60952579 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -99,8 +99,30 @@ enum { XPT_HANDSHAKE, /* xprt requests a handshake */ XPT_TLS_SESSION, /* transport-layer security established */ XPT_PEER_AUTH, /* peer has been authenticated */ + XPT_PEER_VALID, /* peer has presented a filehandle that + * it has access to. It is NOT counted + * in ->sv_tmpcnt. + */ }; +/* + * Maximum number of "tmp" connections - those without XPT_PEER_VALID - + * permitted on any service. + */ +#define XPT_MAX_TMP_CONN 64 + +static inline void svc_xprt_set_valid(struct svc_xprt *xpt) +{ + if (test_bit(XPT_TEMP, &xpt->xpt_flags) && + !test_and_set_bit(XPT_PEER_VALID, &xpt->xpt_flags)) { + struct svc_serv *serv = xpt->xpt_server; + + spin_lock(&serv->sv_lock); + serv->sv_tmpcnt -= 1; + spin_unlock(&serv->sv_lock); + } +} + static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) { spin_lock(&xpt->xpt_lock); diff --git a/include/linux/sunrpc/xdrgen/nfs4_1.h b/include/linux/sunrpc/xdrgen/nfs4_1.h new file mode 100644 index 000000000000..cf21a14aa885 --- /dev/null +++ b/include/linux/sunrpc/xdrgen/nfs4_1.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Generated by xdrgen. Manual edits will be lost. */ +/* XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x */ +/* XDR specification modification time: Mon Oct 14 09:10:13 2024 */ + +#ifndef _LINUX_XDRGEN_NFS4_1_DEF_H +#define _LINUX_XDRGEN_NFS4_1_DEF_H + +#include <linux/types.h> +#include <linux/sunrpc/xdrgen/_defs.h> + +typedef s64 int64_t; + +typedef u32 uint32_t; + +typedef struct { + u32 count; + uint32_t *element; +} bitmap4; + +struct nfstime4 { + int64_t seconds; + uint32_t nseconds; +}; + +typedef bool fattr4_offline; + +enum { FATTR4_OFFLINE = 83 }; + +struct open_arguments4 { + bitmap4 oa_share_access; + bitmap4 oa_share_deny; + bitmap4 oa_share_access_want; + bitmap4 oa_open_claim; + bitmap4 oa_create_mode; +}; + +enum open_args_share_access4 { + OPEN_ARGS_SHARE_ACCESS_READ = 1, + OPEN_ARGS_SHARE_ACCESS_WRITE = 2, + OPEN_ARGS_SHARE_ACCESS_BOTH = 3, +}; +typedef enum open_args_share_access4 open_args_share_access4; + +enum open_args_share_deny4 { + OPEN_ARGS_SHARE_DENY_NONE = 0, + OPEN_ARGS_SHARE_DENY_READ = 1, + OPEN_ARGS_SHARE_DENY_WRITE = 2, + OPEN_ARGS_SHARE_DENY_BOTH = 3, +}; +typedef enum open_args_share_deny4 open_args_share_deny4; + +enum open_args_share_access_want4 { + OPEN_ARGS_SHARE_ACCESS_WANT_ANY_DELEG = 3, + OPEN_ARGS_SHARE_ACCESS_WANT_NO_DELEG = 4, + OPEN_ARGS_SHARE_ACCESS_WANT_CANCEL = 5, + OPEN_ARGS_SHARE_ACCESS_WANT_SIGNAL_DELEG_WHEN_RESRC_AVAIL = 17, + OPEN_ARGS_SHARE_ACCESS_WANT_PUSH_DELEG_WHEN_UNCONTENDED = 18, + OPEN_ARGS_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 20, + OPEN_ARGS_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 21, +}; +typedef enum open_args_share_access_want4 open_args_share_access_want4; + +enum open_args_open_claim4 { + OPEN_ARGS_OPEN_CLAIM_NULL = 0, + OPEN_ARGS_OPEN_CLAIM_PREVIOUS = 1, + OPEN_ARGS_OPEN_CLAIM_DELEGATE_CUR = 2, + OPEN_ARGS_OPEN_CLAIM_DELEGATE_PREV = 3, + OPEN_ARGS_OPEN_CLAIM_FH = 4, + OPEN_ARGS_OPEN_CLAIM_DELEG_CUR_FH = 5, + OPEN_ARGS_OPEN_CLAIM_DELEG_PREV_FH = 6, +}; +typedef enum open_args_open_claim4 open_args_open_claim4; + +enum open_args_createmode4 { + OPEN_ARGS_CREATEMODE_UNCHECKED4 = 0, + OPEN_ARGS_CREATE_MODE_GUARDED = 1, + OPEN_ARGS_CREATEMODE_EXCLUSIVE4 = 2, + OPEN_ARGS_CREATE_MODE_EXCLUSIVE4_1 = 3, +}; +typedef enum open_args_createmode4 open_args_createmode4; + +typedef struct open_arguments4 fattr4_open_arguments; + +enum { FATTR4_OPEN_ARGUMENTS = 86 }; + +enum { OPEN4_RESULT_NO_OPEN_STATEID = 0x00000010 }; + +typedef struct nfstime4 fattr4_time_deleg_access; + +typedef struct nfstime4 fattr4_time_deleg_modify; + +enum { FATTR4_TIME_DELEG_ACCESS = 84 }; + +enum { FATTR4_TIME_DELEG_MODIFY = 85 }; + +enum { OPEN4_SHARE_ACCESS_WANT_DELEG_MASK = 0xFF00 }; + +enum { OPEN4_SHARE_ACCESS_WANT_NO_PREFERENCE = 0x0000 }; + +enum { OPEN4_SHARE_ACCESS_WANT_READ_DELEG = 0x0100 }; + +enum { OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG = 0x0200 }; + +enum { OPEN4_SHARE_ACCESS_WANT_ANY_DELEG = 0x0300 }; + +enum { OPEN4_SHARE_ACCESS_WANT_NO_DELEG = 0x0400 }; + +enum { OPEN4_SHARE_ACCESS_WANT_CANCEL = 0x0500 }; + +enum { OPEN4_SHARE_ACCESS_WANT_SIGNAL_DELEG_WHEN_RESRC_AVAIL = 0x10000 }; + +enum { OPEN4_SHARE_ACCESS_WANT_PUSH_DELEG_WHEN_UNCONTENDED = 0x20000 }; + +enum { OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 0x100000 }; + +enum { OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 0x200000 }; + +enum open_delegation_type4 { + OPEN_DELEGATE_NONE = 0, + OPEN_DELEGATE_READ = 1, + OPEN_DELEGATE_WRITE = 2, + OPEN_DELEGATE_NONE_EXT = 3, + OPEN_DELEGATE_READ_ATTRS_DELEG = 4, + OPEN_DELEGATE_WRITE_ATTRS_DELEG = 5, +}; +typedef enum open_delegation_type4 open_delegation_type4; + +#define NFS4_int64_t_sz \ + (XDR_hyper) +#define NFS4_uint32_t_sz \ + (XDR_unsigned_int) +#define NFS4_bitmap4_sz (XDR_unsigned_int) +#define NFS4_nfstime4_sz \ + (NFS4_int64_t_sz + NFS4_uint32_t_sz) +#define NFS4_fattr4_offline_sz \ + (XDR_bool) +#define NFS4_open_arguments4_sz \ + (NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz) +#define NFS4_open_args_share_access4_sz (XDR_int) +#define NFS4_open_args_share_deny4_sz (XDR_int) +#define NFS4_open_args_share_access_want4_sz (XDR_int) +#define NFS4_open_args_open_claim4_sz (XDR_int) +#define NFS4_open_args_createmode4_sz (XDR_int) +#define NFS4_fattr4_open_arguments_sz \ + (NFS4_open_arguments4_sz) +#define NFS4_fattr4_time_deleg_access_sz \ + (NFS4_nfstime4_sz) +#define NFS4_fattr4_time_deleg_modify_sz \ + (NFS4_nfstime4_sz) +#define NFS4_open_delegation_type4_sz (XDR_int) + +#endif /* _LINUX_XDRGEN_NFS4_1_DEF_H */ diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h index c0514c684b2c..e411368cdacf 100644 --- a/include/linux/sunrpc/xprtmultipath.h +++ b/include/linux/sunrpc/xprtmultipath.h @@ -75,7 +75,6 @@ extern struct rpc_xprt_switch *xprt_iter_xchg_switch( struct rpc_xprt_switch *newswitch); extern struct rpc_xprt *xprt_iter_xprt(struct rpc_xprt_iter *xpi); -extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi); extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi); extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps, diff --git a/include/linux/swap.h b/include/linux/swap.h index f3e0ac20c2e8..a5f475335aea 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -219,7 +219,6 @@ enum { SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ /* add others here before... */ - SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ }; #define SWAP_CLUSTER_MAX 32UL @@ -257,18 +256,27 @@ struct swap_cluster_info { u8 order; struct list_head list; }; -#define CLUSTER_FLAG_FREE 1 /* This cluster is free */ -#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */ -#define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */ -#define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */ + +/* All on-list cluster must have a non-zero flag. */ +enum swap_cluster_flags { + CLUSTER_FLAG_NONE = 0, /* For temporary off-list cluster */ + CLUSTER_FLAG_FREE, + CLUSTER_FLAG_NONFULL, + CLUSTER_FLAG_FRAG, + /* Clusters with flags above are allocatable */ + CLUSTER_FLAG_USABLE = CLUSTER_FLAG_FRAG, + CLUSTER_FLAG_FULL, + CLUSTER_FLAG_DISCARD, + CLUSTER_FLAG_MAX, +}; /* * The first page in the swap file is the swap header, which is always marked * bad to prevent it from being allocated as an entry. This also prevents the * cluster to which it belongs being marked free. Therefore 0 is safe to use as - * a sentinel to indicate next is not valid in percpu_cluster. + * a sentinel to indicate an entry is not valid. */ -#define SWAP_NEXT_INVALID 0 +#define SWAP_ENTRY_INVALID 0 #ifdef CONFIG_THP_SWAP #define SWAP_NR_ORDERS (PMD_ORDER + 1) @@ -282,6 +290,7 @@ struct swap_cluster_info { * throughput. */ struct percpu_cluster { + local_lock_t lock; /* Protect the percpu_cluster above */ unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; @@ -304,15 +313,12 @@ struct swap_info_struct { /* list of cluster that contains at least one free slot */ struct list_head frag_clusters[SWAP_NR_ORDERS]; /* list of cluster that are fragmented or contented */ - unsigned int frag_cluster_nr[SWAP_NR_ORDERS]; - unsigned int lowest_bit; /* index of first free in swap_map */ - unsigned int highest_bit; /* index of last free in swap_map */ + atomic_long_t frag_cluster_nr[SWAP_NR_ORDERS]; unsigned int pages; /* total of usable pages of swap */ - unsigned int inuse_pages; /* number of those currently in use */ - unsigned int cluster_next; /* likely index for next allocation */ - unsigned int cluster_nr; /* countdown to next cluster search */ - unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ + atomic_long_t inuse_pages; /* number of those currently in use */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ + struct percpu_cluster *global_cluster; /* Use one global cluster for rotating device */ + spinlock_t global_cluster_lock; /* Serialize usage of global cluster */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h index ae73a87775b3..b5ec038069da 100644 --- a/include/linux/swap_cgroup.h +++ b/include/linux/swap_cgroup.h @@ -6,10 +6,8 @@ #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) -extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new); -extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, - unsigned int nr_ents); +extern void swap_cgroup_record(struct folio *folio, swp_entry_t ent); +extern unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents); extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); extern int swap_cgroup_swapon(int type, unsigned long max_pages); extern void swap_cgroup_swapoff(int type); @@ -17,8 +15,12 @@ extern void swap_cgroup_swapoff(int type); #else static inline -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, - unsigned int nr_ents) +void swap_cgroup_record(struct folio *folio, swp_entry_t ent) +{ +} + +static inline +unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents) { return 0; } diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h index 15adfb8c813a..840aec3523b2 100644 --- a/include/linux/swap_slots.h +++ b/include/linux/swap_slots.h @@ -16,15 +16,12 @@ struct swap_slots_cache { swp_entry_t *slots; int nr; int cur; - spinlock_t free_lock; /* protects slots_ret, n_ret */ - swp_entry_t *slots_ret; int n_ret; }; void disable_swap_slots_cache_lock(void); void reenable_swap_slots_cache_unlock(void); void enable_swap_slots_cache(void); -void free_swap_slot(swp_entry_t entry); extern bool swap_slot_cache_enabled; diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 2964171856e0..0646804860ff 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -19,9 +19,6 @@ enum task_work_notify_mode { TWA_SIGNAL, TWA_SIGNAL_NO_IPI, TWA_NMI_CURRENT, - - TWA_FLAGS = 0xff00, - TWAF_NO_ALLOC = 0x0100, }; static inline bool task_work_pending(struct task_struct *task) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 754802478b96..69f9bedd0ee8 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -295,6 +295,10 @@ static inline struct thermal_zone_device *thermal_tripless_zone_device_register( static inline void thermal_zone_device_unregister(struct thermal_zone_device *tz) { } +static inline void thermal_zone_device_update(struct thermal_zone_device *tz, + enum thermal_notify_event event) +{ } + static inline struct thermal_cooling_device * thermal_cooling_device_register(const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) diff --git a/include/linux/time64.h b/include/linux/time64.h index f1bcea8c124a..9934331c7b86 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -49,6 +49,11 @@ static inline int timespec64_equal(const struct timespec64 *a, return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); } +static inline bool timespec64_is_epoch(const struct timespec64 *ts) +{ + return ts->tv_sec == 0 && ts->tv_nsec == 0; +} + /* * lhs < rhs: return <0 * lhs == rhs: return 0 diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 0e035f675efe..542773650200 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -264,18 +264,6 @@ extern bool timekeeping_rtc_skipresume(void); extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); /** - * struct ktime_timestamps - Simultaneous mono/boot/real timestamps - * @mono: Monotonic timestamp - * @boot: Boottime timestamp - * @real: Realtime timestamp - */ -struct ktime_timestamps { - u64 mono; - u64 boot; - u64 real; -}; - -/** * struct system_time_snapshot - simultaneous raw/real time capture with * counter value * @cycles: Clocksource counter value to produce the system times @@ -345,9 +333,6 @@ extern int get_device_system_crosststamp( */ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot); -/* NMI safe mono/boot/realtime timestamps */ -extern void ktime_get_fast_timestamps(struct ktime_timestamps *snap); - /* * Persistent clock related interfaces */ diff --git a/include/linux/torture.h b/include/linux/torture.h index c2e979f82f8d..0134e7221cae 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -130,7 +130,7 @@ void _torture_stop_kthread(char *m, struct task_struct **tp); #endif #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST) -long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask); +long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool dowarn); #endif #endif /* __LINUX_TORTURE_H */ diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 58ad4ead33fc..5caea596fef0 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -673,6 +673,20 @@ struct trace_event_file { atomic_t tm_ref; /* trigger-mode reference counter */ }; +#ifdef CONFIG_HIST_TRIGGERS +extern struct irq_work hist_poll_work; +extern wait_queue_head_t hist_poll_wq; + +static inline void hist_poll_wakeup(void) +{ + if (wq_has_sleeper(&hist_poll_wq)) + irq_work_queue(&hist_poll_work); +} + +#define hist_poll_wait(file, wait) \ + poll_wait(file, &hist_poll_wq, wait) +#endif + #define __TRACE_EVENT_FLAGS(name, value) \ static int __init trace_init_flags_##name(void) \ { \ diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 76d9055b2cff..a351763e6965 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -218,7 +218,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define __DEFINE_RUST_DO_TRACE(name, proto, args) \ notrace void rust_do_trace_##name(proto) \ { \ - __rust_do_trace_##name(args); \ + __do_trace_##name(args); \ } /* @@ -268,7 +268,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ - static inline void __rust_do_trace_##name(proto) \ + static inline void __do_trace_##name(proto) \ { \ if (cond) { \ guard(preempt_notrace)(); \ @@ -277,12 +277,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) } \ static inline void trace_##name(proto) \ { \ - if (static_branch_unlikely(&__tracepoint_##name.key)) { \ - if (cond) { \ - guard(preempt_notrace)(); \ - __DO_TRACE_CALL(name, TP_ARGS(args)); \ - } \ - } \ + if (static_branch_unlikely(&__tracepoint_##name.key)) \ + __do_trace_##name(args); \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ WARN_ONCE(!rcu_is_watching(), \ "RCU not watching for tracepoint"); \ @@ -291,7 +287,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ - static inline void __rust_do_trace_##name(proto) \ + static inline void __do_trace_##name(proto) \ { \ guard(rcu_tasks_trace)(); \ __DO_TRACE_CALL(name, TP_ARGS(args)); \ @@ -299,10 +295,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static inline void trace_##name(proto) \ { \ might_fault(); \ - if (static_branch_unlikely(&__tracepoint_##name.key)) { \ - guard(rcu_tasks_trace)(); \ - __DO_TRACE_CALL(name, TP_ARGS(args)); \ - } \ + if (static_branch_unlikely(&__tracepoint_##name.key)) \ + __do_trace_##name(args); \ if (IS_ENABLED(CONFIG_LOCKDEP)) { \ WARN_ONCE(!rcu_is_watching(), \ "RCU not watching for tracepoint"); \ diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index dd4b31ce6d5d..d4cdc089f6c3 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -320,7 +320,7 @@ struct serial_struct; * * @poll_init: ``int ()(struct tty_driver *driver, int line, char *options)`` * - * kgdboc support (Documentation/dev-tools/kgdb.rst). This routine is + * kgdboc support (Documentation/process/debugging/kgdb.rst). This routine is * called to initialize the HW for later use by calling @poll_get_char or * @poll_put_char. * diff --git a/include/linux/turris-omnia-mcu-interface.h b/include/linux/turris-omnia-mcu-interface.h index 2da8cbeb158a..38b45ab00053 100644 --- a/include/linux/turris-omnia-mcu-interface.h +++ b/include/linux/turris-omnia-mcu-interface.h @@ -9,7 +9,10 @@ #define __TURRIS_OMNIA_MCU_INTERFACE_H #include <linux/bitfield.h> -#include <linux/bits.h> +#include <linux/bitops.h> +#include <linux/types.h> +#include <linux/unaligned.h> +#include <asm/byteorder.h> enum omnia_commands_e { OMNIA_CMD_GET_STATUS_WORD = 0x01, /* slave sends status word back */ @@ -236,6 +239,20 @@ enum omnia_int_e { OMNIA_INT_LAN5_LED1 = BIT(31), }; +enum omnia_cmd_led_mode_e { + OMNIA_CMD_LED_MODE_LED_MASK = GENMASK(3, 0), + OMNIA_CMD_LED_MODE_USER = BIT(4), +}; + +#define OMNIA_CMD_LED_MODE_LED(_l) FIELD_PREP(OMNIA_CMD_LED_MODE_LED_MASK, _l) + +enum omnia_cmd_led_state_e { + OMNIA_CMD_LED_STATE_LED_MASK = GENMASK(3, 0), + OMNIA_CMD_LED_STATE_ON = BIT(4), +}; + +#define OMNIA_CMD_LED_STATE_LED(_l) FIELD_PREP(OMNIA_CMD_LED_STATE_LED_MASK, _l) + enum omnia_cmd_poweroff_e { OMNIA_CMD_POWER_OFF_POWERON_BUTTON = BIT(0), OMNIA_CMD_POWER_OFF_MAGIC = 0xdead, @@ -246,4 +263,135 @@ enum omnia_cmd_usb_ovc_prot_e { OMNIA_CMD_xET_USB_OVC_PROT_ENABLE = BIT(4), }; +/* Command execution functions */ + +struct i2c_client; + +int omnia_cmd_write_read(const struct i2c_client *client, + void *cmd, unsigned int cmd_len, + void *reply, unsigned int reply_len); + +static inline int omnia_cmd_write(const struct i2c_client *client, void *cmd, + unsigned int len) +{ + return omnia_cmd_write_read(client, cmd, len, NULL, 0); +} + +static inline int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, + u8 val) +{ + u8 buf[2] = { cmd, val }; + + return omnia_cmd_write(client, buf, sizeof(buf)); +} + +static inline int omnia_cmd_write_u16(const struct i2c_client *client, u8 cmd, + u16 val) +{ + u8 buf[3]; + + buf[0] = cmd; + put_unaligned_le16(val, &buf[1]); + + return omnia_cmd_write(client, buf, sizeof(buf)); +} + +static inline int omnia_cmd_write_u32(const struct i2c_client *client, u8 cmd, + u32 val) +{ + u8 buf[5]; + + buf[0] = cmd; + put_unaligned_le32(val, &buf[1]); + + return omnia_cmd_write(client, buf, sizeof(buf)); +} + +static inline int omnia_cmd_read(const struct i2c_client *client, u8 cmd, + void *reply, unsigned int len) +{ + return omnia_cmd_write_read(client, &cmd, 1, reply, len); +} + +static inline unsigned int +omnia_compute_reply_length(unsigned long mask, bool interleaved, + unsigned int offset) +{ + if (!mask) + return 0; + + return ((__fls(mask) >> 3) << interleaved) + 1 + offset; +} + +/* Returns 0 on success */ +static inline int omnia_cmd_read_bits(const struct i2c_client *client, u8 cmd, + unsigned long bits, unsigned long *dst) +{ + __le32 reply; + int err; + + if (!bits) { + *dst = 0; + return 0; + } + + err = omnia_cmd_read(client, cmd, &reply, + omnia_compute_reply_length(bits, false, 0)); + if (err) + return err; + + *dst = le32_to_cpu(reply) & bits; + + return 0; +} + +static inline int omnia_cmd_read_bit(const struct i2c_client *client, u8 cmd, + unsigned long bit) +{ + unsigned long reply; + int err; + + err = omnia_cmd_read_bits(client, cmd, bit, &reply); + if (err) + return err; + + return !!reply; +} + +static inline int omnia_cmd_read_u32(const struct i2c_client *client, u8 cmd, + u32 *dst) +{ + __le32 reply; + int err; + + err = omnia_cmd_read(client, cmd, &reply, sizeof(reply)); + if (err) + return err; + + *dst = le32_to_cpu(reply); + + return 0; +} + +static inline int omnia_cmd_read_u16(const struct i2c_client *client, u8 cmd, + u16 *dst) +{ + __le16 reply; + int err; + + err = omnia_cmd_read(client, cmd, &reply, sizeof(reply)); + if (err) + return err; + + *dst = le16_to_cpu(reply); + + return 0; +} + +static inline int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd, + u8 *reply) +{ + return omnia_cmd_read(client, cmd, reply, sizeof(*reply)); +} + #endif /* __TURRIS_OMNIA_MCU_INTERFACE_H */ diff --git a/include/linux/types.h b/include/linux/types.h index 2d7b9ae8714c..1c509ce8f7f6 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -43,7 +43,7 @@ typedef unsigned long uintptr_t; typedef long intptr_t; #ifdef CONFIG_HAVE_UID16 -/* This is defined by include/asm-{arch}/posix_types.h */ +/* This is defined by arch/{arch}/include/asm/posix_types.h */ typedef __kernel_old_uid_t old_uid_t; typedef __kernel_old_gid_t old_gid_t; #endif /* CONFIG_UID16 */ diff --git a/include/linux/uio.h b/include/linux/uio.h index 853f9de5aa05..8ada84e85447 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -82,6 +82,15 @@ struct iov_iter { }; }; +typedef __u16 uio_meta_flags_t; + +struct uio_meta { + uio_meta_flags_t flags; + u16 app_tag; + u64 seed; + struct iov_iter iter; +}; + static inline const struct iovec *iter_iov(const struct iov_iter *iter) { if (iter->iter_type == ITER_UBUF) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index e0a4c2082245..b1df7d792fa1 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -16,6 +16,7 @@ #include <linux/types.h> #include <linux/wait.h> #include <linux/timer.h> +#include <linux/seqlock.h> struct uprobe; struct vm_area_struct; @@ -124,6 +125,10 @@ struct uprobe_task { unsigned int depth; struct return_instance *return_instances; + struct return_instance *ri_pool; + struct timer_list ri_timer; + seqcount_t ri_seqcount; + union { struct { struct arch_uprobe_task autask; @@ -137,7 +142,6 @@ struct uprobe_task { }; struct uprobe *active_uprobe; - struct timer_list ri_timer; unsigned long xol_vaddr; struct arch_uprobe *auprobe; @@ -154,12 +158,18 @@ struct return_instance { unsigned long stack; /* stack pointer */ unsigned long orig_ret_vaddr; /* original return address */ bool chained; /* true, if instance is nested */ - int consumers_cnt; + int cons_cnt; /* total number of session consumers */ struct return_instance *next; /* keep as stack */ struct rcu_head rcu; - struct return_consumer consumers[] __counted_by(consumers_cnt); + /* singular pre-allocated return_consumer instance for common case */ + struct return_consumer consumer; + /* + * extra return_consumer instances for rare cases of multiple session consumers, + * contains (cons_cnt - 1) elements + */ + struct return_consumer *extra_consumers; } ____cacheline_aligned; enum rp_check { diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index d50098fb16b5..3068c3084eb6 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -33,7 +33,9 @@ enum pd_ctrl_msg_type { PD_CTRL_FR_SWAP = 19, PD_CTRL_GET_PPS_STATUS = 20, PD_CTRL_GET_COUNTRY_CODES = 21, - /* 22-31 Reserved */ + /* 22-23 Reserved */ + PD_CTRL_GET_REVISION = 24, + /* 25-31 Reserved */ }; enum pd_data_msg_type { @@ -46,7 +48,9 @@ enum pd_data_msg_type { PD_DATA_ALERT = 6, PD_DATA_GET_COUNTRY_INFO = 7, PD_DATA_ENTER_USB = 8, - /* 9-14 Reserved */ + /* 9-11 Reserved */ + PD_DATA_REVISION = 12, + /* 13-14 Reserved */ PD_DATA_VENDOR_DEF = 15, /* 16-31 Reserved */ }; @@ -453,6 +457,20 @@ static inline unsigned int rdo_max_power(u32 rdo) #define EUDO_TBT_SUPPORT BIT(14) #define EUDO_HOST_PRESENT BIT(13) +/* + * Request Message Data Object (PD Revision 3.1+ only) + * -------- + * <31:28> :: Revision Major + * <27:24> :: Revision Minor + * <23:20> :: Version Major + * <19:16> :: Version Minor + * <15:0> :: Reserved, Shall be set to zero + */ + +#define RMDO(rev_maj, rev_min, ver_maj, ver_min) \ + (((rev_maj) & 0xf) << 28 | ((rev_min) & 0xf) << 24 | \ + ((ver_maj) & 0xf) << 20 | ((ver_min) & 0xf) << 16) + /* USB PD timers and counters */ #define PD_T_NO_RESPONSE 5000 /* 4.5 - 5.5 seconds */ #define PD_T_DB_DETECT 10000 /* 10 - 15 seconds */ diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index e4de6bc1f69b..0fa9885a1038 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -223,7 +223,6 @@ extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev, extern struct usb_phy *devm_usb_get_phy_by_node(struct device *dev, struct device_node *node, struct notifier_block *nb); extern void usb_put_phy(struct usb_phy *); -extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); extern void usb_phy_set_event(struct usb_phy *x, unsigned long event); extern void usb_phy_set_charger_current(struct usb_phy *usb_phy, unsigned int mA); @@ -259,10 +258,6 @@ static inline void usb_put_phy(struct usb_phy *x) { } -static inline void devm_usb_put_phy(struct device *dev, struct usb_phy *x) -{ -} - static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event) { } diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h index 8539956bc2be..51be3bb8fccb 100644 --- a/include/linux/usb/storage.h +++ b/include/linux/usb/storage.h @@ -82,4 +82,12 @@ struct bulk_cs_wrap { #define US_BULK_RESET_REQUEST 0xff #define US_BULK_GET_MAX_LUN 0xfe +/* + * If 4 LUNs are supported then the LUNs would be + * numbered from 0 to 3, and the return value for + * US_BULK_GET_MAX_LUN request would be 3. The valid + * LUN field is 4 bits wide, the upper limit is 0x0f. + */ +#define US_BULK_MAX_LUN_LIMIT 0x0f + #endif diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index 061da9546a81..b22e659f81ba 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -163,7 +163,8 @@ struct tcpc_dev { void (*frs_sourcing_vbus)(struct tcpc_dev *dev); int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable); int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode, - bool pps_active, u32 requested_vbus_voltage); + bool pps_active, u32 requested_vbus_voltage, + u32 pps_apdo_min_voltage); bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev); void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable); void (*check_contaminant)(struct tcpc_dev *dev); diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index d616b8807000..252af3f77039 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -140,6 +140,7 @@ int typec_cable_set_identity(struct typec_cable *cable); * @mode: Index of the Mode * @vdo: VDO returned by Discover Modes USB PD command * @roles: Only for ports. DRP if the mode is available in both roles + * @inactive: Only for ports. Make this port inactive (default is active). * * Description of an Alternate Mode which a connector, cable plug or partner * supports. @@ -150,6 +151,7 @@ struct typec_altmode_desc { u32 vdo; /* Only used with ports */ enum typec_port_data roles; + bool inactive; }; void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision); diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h index fa97d7e00f5c..55dcea12082c 100644 --- a/include/linux/usb/typec_tbt.h +++ b/include/linux/usb/typec_tbt.h @@ -44,6 +44,7 @@ struct typec_thunderbolt_data { #define TBT_GEN3_NON_ROUNDED 0 #define TBT_GEN3_GEN4_ROUNDED_NON_ROUNDED 1 +#define TBT_CABLE_ROUNDED BIT(19) #define TBT_CABLE_OPTICAL BIT(21) #define TBT_CABLE_RETIMER BIT(22) #define TBT_CABLE_LINK_TRAINING BIT(23) diff --git a/include/linux/verification.h b/include/linux/verification.h index cb2d47f28091..4f3022d081c3 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h @@ -38,8 +38,6 @@ enum key_being_used_for { VERIFYING_UNSPECIFIED_SIGNATURE, NR__KEY_BEING_USED_FOR }; -extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR]; - #ifdef CONFIG_SYSTEM_DATA_VERIFICATION struct key; diff --git a/include/linux/virtio.h b/include/linux/virtio.h index dd88682e27e3..4d16c13d0df5 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -190,6 +190,8 @@ int virtio_device_freeze(struct virtio_device *dev); int virtio_device_restore(struct virtio_device *dev); #endif void virtio_reset_device(struct virtio_device *dev); +int virtio_device_reset_prepare(struct virtio_device *dev); +int virtio_device_reset_done(struct virtio_device *dev); size_t virtio_max_dma_size(const struct virtio_device *vdev); @@ -214,6 +216,10 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev); * changes; may be called in interrupt context. * @freeze: optional function to call during suspend/hibernation. * @restore: optional function to call on resume. + * @reset_prepare: optional function to call when a transport specific reset + * occurs. + * @reset_done: optional function to call after transport specific reset + * operation has finished. */ struct virtio_driver { struct device_driver driver; @@ -229,6 +235,8 @@ struct virtio_driver { void (*config_changed)(struct virtio_device *dev); int (*freeze)(struct virtio_device *dev); int (*restore)(struct virtio_device *dev); + int (*reset_prepare)(struct virtio_device *dev); + int (*reset_done)(struct virtio_device *dev); }; #define drv_to_virtio(__drv) container_of_const(__drv, struct virtio_driver, driver) diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 6fb663b36f72..60c9eacd2cf3 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -431,11 +431,11 @@ enum { ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2])) /* - * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and - * we start at sequence 9f. This gives us the same values that our shipping - * products use, starting at 1951, provided we leave out the direction and - * structure size. Note that VMMon occupies the block following us, starting - * at 2001. + * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.rst, + * and we start at sequence 9f. This gives us the same values that our + * shipping products use, starting at 1951, provided we leave out the + * direction and structure size. Note that VMMon occupies the block + * following us, starting at 2001. */ #define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */ #define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0) @@ -453,9 +453,7 @@ enum { #define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1) #define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2) #define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3) -#define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4) -#define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8) -#define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9) +/*IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)*/ #define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */ /*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */ diff --git a/include/linux/wwan.h b/include/linux/wwan.h index 79c781875c09..a4d6cc0c9f68 100644 --- a/include/linux/wwan.h +++ b/include/linux/wwan.h @@ -97,7 +97,7 @@ struct wwan_port_caps { * * This function must be balanced with a call to wwan_remove_port(). * - * Returns a valid pointer to wwan_port on success or PTR_ERR on failure + * Returns: a valid pointer to wwan_port on success or PTR_ERR on failure */ struct wwan_port *wwan_create_port(struct device *parent, enum wwan_port_type type, diff --git a/include/media/cec.h b/include/media/cec.h index 16b412b3131b..0c8e86115b6f 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -10,7 +10,6 @@ #include <linux/poll.h> #include <linux/fs.h> -#include <linux/debugfs.h> #include <linux/device.h> #include <linux/cdev.h> #include <linux/kthread.h> diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h index 5bce6e423e94..e7f019f68c8d 100644 --- a/include/media/v4l2-mediabus.h +++ b/include/media/v4l2-mediabus.h @@ -74,6 +74,24 @@ #define V4L2_MBUS_CSI2_MAX_DATA_LANES 8 /** + * enum v4l2_mbus_csi2_cphy_line_orders_type - CSI-2 C-PHY line order + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC: C-PHY line order ABC (default) + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB: C-PHY line order ACB + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC: C-PHY line order BAC + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA: C-PHY line order BCA + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB: C-PHY line order CAB + * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA: C-PHY line order CBA + */ +enum v4l2_mbus_csi2_cphy_line_orders_type { + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB, + V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA, +}; + +/** * struct v4l2_mbus_config_mipi_csi2 - MIPI CSI-2 data bus configuration * @flags: media bus (V4L2_MBUS_*) flags * @data_lanes: an array of physical data lane indexes @@ -81,6 +99,8 @@ * @num_data_lanes: number of data lanes * @lane_polarities: polarity of the lanes. The order is the same of * the physical lanes. + * @line_orders: line order of the data lanes. The order is the same of the + * physical lanes. */ struct v4l2_mbus_config_mipi_csi2 { unsigned int flags; @@ -88,6 +108,7 @@ struct v4l2_mbus_config_mipi_csi2 { unsigned char clock_lane; unsigned char num_data_lanes; bool lane_polarities[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES]; + enum v4l2_mbus_csi2_cphy_line_orders_type line_orders[V4L2_MBUS_CSI2_MAX_DATA_LANES]; }; /** diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 363dd63babe7..9e5e95988b9e 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -88,6 +88,23 @@ struct ifa6_config { u16 scope; }; +enum addr_type_t { + UNICAST_ADDR, + MULTICAST_ADDR, + ANYCAST_ADDR, +}; + +struct inet6_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; + enum addr_type_t type; + bool force_rt_scope_universe; +}; + int addrconf_init(void); void addrconf_cleanup(void); @@ -330,6 +347,11 @@ static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev) return rcu_dereference_rtnl(dev->ip6_ptr); } +static inline struct inet6_dev *__in6_dev_get_rtnl_net(const struct net_device *dev) +{ + return rtnl_net_dereference(dev_net(dev), dev->ip6_ptr); +} + /** * __in6_dev_stats_get - get inet6_dev pointer for stats * @dev: network device @@ -525,4 +547,11 @@ int if6_proc_init(void); void if6_proc_exit(void); #endif +int inet6_fill_ifmcaddr(struct sk_buff *skb, + const struct ifmcaddr6 *ifmca, + struct inet6_fill_args *args); + +int inet6_fill_ifacaddr(struct sk_buff *skb, + const struct ifacaddr6 *ifaca, + struct inet6_fill_args *args); #endif diff --git a/include/net/ax25.h b/include/net/ax25.h index cb622d84cd0c..4ee141aae0a2 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -231,6 +231,7 @@ typedef struct ax25_dev { #endif refcount_t refcount; bool device_up; + struct rcu_head rcu; } ax25_dev; typedef struct ax25_cb { @@ -290,9 +291,8 @@ static inline void ax25_dev_hold(ax25_dev *ax25_dev) static inline void ax25_dev_put(ax25_dev *ax25_dev) { - if (refcount_dec_and_test(&ax25_dev->refcount)) { - kfree(ax25_dev); - } + if (refcount_dec_and_test(&ax25_dev->refcount)) + kfree_rcu(ax25_dev, rcu); } static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev) { @@ -335,9 +335,9 @@ void ax25_digi_invert(const ax25_digi *, ax25_digi *); extern spinlock_t ax25_dev_lock; #if IS_ENABLED(CONFIG_AX25) -static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev) +static inline ax25_dev *ax25_dev_ax25dev(const struct net_device *dev) { - return dev->ax25_ptr; + return rcu_dereference_rtnl(dev->ax25_ptr); } #endif diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 6203bd8663b7..0d51970d809f 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -438,7 +438,6 @@ enum { HCI_FORCE_BREDR_SMP, HCI_FORCE_STATIC_ADDR, HCI_LL_RPA_RESOLUTION, - HCI_ENABLE_LL_PRIVACY, HCI_CMD_PENDING, HCI_FORCE_NO_MITM, HCI_QUALITY_REPORT, diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index ca22ead85dbe..f756fac95488 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -157,8 +157,9 @@ struct bdaddr_list_with_irk { /* Bitmask of connection flags */ enum hci_conn_flags { - HCI_CONN_FLAG_REMOTE_WAKEUP = 1, - HCI_CONN_FLAG_DEVICE_PRIVACY = 2, + HCI_CONN_FLAG_REMOTE_WAKEUP = BIT(0), + HCI_CONN_FLAG_DEVICE_PRIVACY = BIT(1), + HCI_CONN_FLAG_ADDRESS_RESOLUTION = BIT(2), }; typedef u8 hci_conn_flags_t; @@ -632,7 +633,6 @@ struct hci_dev { int (*post_init)(struct hci_dev *hdev); int (*set_diag)(struct hci_dev *hdev, bool enable); int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); - void (*cmd_timeout)(struct hci_dev *hdev); void (*reset)(struct hci_dev *hdev); bool (*wakeup)(struct hci_dev *hdev); int (*set_quality_report)(struct hci_dev *hdev, bool enable); @@ -1759,8 +1759,6 @@ int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type); -int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, - u8 type); void hci_bdaddr_list_clear(struct list_head *list); struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, @@ -1919,11 +1917,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) -/* Use LL Privacy based address resolution if supported */ -#define use_ll_privacy(dev) (ll_privacy_capable(dev) && \ - hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY)) - -#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \ +#define privacy_mode_capable(dev) (ll_privacy_capable(dev) && \ (hdev->commands[39] & 0x04)) #define read_key_size_capable(dev) \ diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h index f3052cb252ef..7e2cf0cca939 100644 --- a/include/net/bluetooth/hci_sync.h +++ b/include/net/bluetooth/hci_sync.h @@ -140,7 +140,6 @@ int hci_update_scan(struct hci_dev *hdev); int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul); int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, struct sock *sk); -int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance); struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool ext, struct sock *sk); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 27acf1292a5c..363d7dd2255a 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3023,6 +3023,10 @@ static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id) * * @bss: The BSS to authenticate with, the callee must obtain a reference * to it if it needs to keep it. + * @supported_selectors: List of selectors that should be assumed to be + * supported by the station. + * SAE_H2E must be assumed supported if set to %NULL. + * @supported_selectors_len: Length of supported_selectors in octets. * @auth_type: Authentication type (algorithm) * @ie: Extra IEs to add to Authentication frame or %NULL * @ie_len: Length of ie buffer in octets @@ -3045,6 +3049,8 @@ struct cfg80211_auth_request { struct cfg80211_bss *bss; const u8 *ie; size_t ie_len; + const u8 *supported_selectors; + u8 supported_selectors_len; enum nl80211_auth_type auth_type; const u8 *key; u8 key_len; @@ -3124,6 +3130,10 @@ enum cfg80211_assoc_req_flags { * included in the Current AP address field of the Reassociation Request * frame. * @flags: See &enum cfg80211_assoc_req_flags + * @supported_selectors: supported selectors in IEEE 802.11 format + * (or %NULL for no change). + * If %NULL, then support for SAE_H2E should be assumed. + * @supported_selectors_len: Length of supported_selectors in octets. * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. @@ -3150,6 +3160,8 @@ struct cfg80211_assoc_request { struct cfg80211_crypto_settings crypto; bool use_mfp; u32 flags; + const u8 *supported_selectors; + u8 supported_selectors_len; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; struct ieee80211_vht_cap vht_capa, vht_capa_mask; @@ -4582,8 +4594,18 @@ struct mgmt_frame_regs { * * @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames. * @set_ttlm: set the TID to link mapping. + * @set_epcs: Enable/Disable EPCS for station mode. * @get_radio_mask: get bitmask of radios in use. * (invoked with the wiphy mutex held) + * @assoc_ml_reconf: Request a non-AP MLO connection to perform ML + * reconfiguration, i.e., add and/or remove links to/from the + * association using ML reconfiguration action frames. Successfully added + * links will be added to the set of valid links. Successfully removed + * links will be removed from the set of valid links. The driver must + * indicate removed links by calling cfg80211_links_removed() and added + * links by calling cfg80211_mlo_reconf_add_done(). When calling + * cfg80211_mlo_reconf_add_done() the bss pointer must be given for each + * link for which MLO reconfiguration 'add' operation was requested. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -4733,7 +4755,7 @@ struct cfg80211_ops { int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm); int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev, - int *dbm); + unsigned int link_id, int *dbm); void (*rfkill_poll)(struct wiphy *wiphy); @@ -4947,6 +4969,11 @@ struct cfg80211_ops { int (*set_ttlm)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ttlm_params *params); u32 (*get_radio_mask)(struct wiphy *wiphy, struct net_device *dev); + int (*assoc_ml_reconf)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_assoc_link *add_links, + u16 rem_links); + int (*set_epcs)(struct wiphy *wiphy, struct net_device *dev, + bool val); }; /* @@ -5957,7 +5984,7 @@ int wiphy_register(struct wiphy *wiphy); * @wiphy: the wiphy to check the locking on * @p: The pointer to read, prior to dereferencing * - * Return the value of the specified RCU-protected pointer, but omit the + * Return: the value of the specified RCU-protected pointer, but omit the * READ_ONCE(), because caller holds the wiphy mutex used for updates. */ #define wiphy_dereference(wiphy, p) \ @@ -6031,6 +6058,10 @@ static inline void wiphy_unlock(struct wiphy *wiphy) mutex_unlock(&wiphy->mtx); } +DEFINE_GUARD(wiphy, struct wiphy *, + mutex_lock(&_T->mtx), + mutex_unlock(&_T->mtx)) + struct wiphy_work; typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *); @@ -9701,6 +9732,39 @@ static inline int cfg80211_color_change_notify(struct net_device *dev, void cfg80211_links_removed(struct net_device *dev, u16 link_mask); /** + * struct cfg80211_mlo_reconf_done_data - MLO reconfiguration data + * @buf: MLO Reconfiguration Response frame (header + body) + * @len: length of the frame data + * @added_links: BIT mask of links successfully added to the association + * @links: per-link information indexed by link ID + * @links.bss: the BSS that MLO reconfiguration was requested for, ownership of + * the pointer moves to cfg80211 in the call to + * cfg80211_mlo_reconf_add_done(). + * + * The BSS pointer must be set for each link for which 'add' operation was + * requested in the assoc_ml_reconf callback. + */ +struct cfg80211_mlo_reconf_done_data { + const u8 *buf; + size_t len; + u16 added_links; + struct { + struct cfg80211_bss *bss; + } links[IEEE80211_MLD_MAX_NUM_LINKS]; +}; + +/** + * cfg80211_mlo_reconf_add_done - Notify about MLO reconfiguration result + * @dev: network device. + * @data: MLO reconfiguration done data, &struct cfg80211_mlo_reconf_done_data + * + * Inform cfg80211 and the userspace that processing of ML reconfiguration + * request to add links to the association is done. + */ +void cfg80211_mlo_reconf_add_done(struct net_device *dev, + struct cfg80211_mlo_reconf_done_data *data); + +/** * cfg80211_schedule_channels_check - schedule regulatory check if needed * @wdev: the wireless device to check * @@ -9710,6 +9774,13 @@ void cfg80211_links_removed(struct net_device *dev, u16 link_mask); */ void cfg80211_schedule_channels_check(struct wireless_dev *wdev); +/** + * cfg80211_epcs_changed - Notify about a change in EPCS state + * @netdev: the wireless device whose EPCS state changed + * @enabled: set to true if EPCS was enabled, otherwise set to false. + */ +void cfg80211_epcs_changed(struct net_device *netdev, bool enabled); + #ifdef CONFIG_CFG80211_DEBUGFS /** * wiphy_locked_debugfs_read - do a locked read in debugfs diff --git a/include/net/devlink.h b/include/net/devlink.h index fbb9a2668e24..b8783126c1ed 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -35,7 +35,7 @@ struct devlink_port_phys_attrs { /** * struct devlink_port_pci_pf_attrs - devlink port's PCI PF attributes * @controller: Associated controller number - * @pf: Associated PCI PF number for this port. + * @pf: associated PCI function number for the devlink port instance * @external: when set, indicates if a port is for an external controller */ struct devlink_port_pci_pf_attrs { @@ -47,8 +47,9 @@ struct devlink_port_pci_pf_attrs { /** * struct devlink_port_pci_vf_attrs - devlink port's PCI VF attributes * @controller: Associated controller number - * @pf: Associated PCI PF number for this port. - * @vf: Associated PCI VF for of the PCI PF for this port. + * @pf: associated PCI function number for the devlink port instance + * @vf: associated PCI VF number of a PF for the devlink port instance; + * VF number starts from 0 for the first PCI virtual function * @external: when set, indicates if a port is for an external controller */ struct devlink_port_pci_vf_attrs { @@ -61,8 +62,8 @@ struct devlink_port_pci_vf_attrs { /** * struct devlink_port_pci_sf_attrs - devlink port's PCI SF attributes * @controller: Associated controller number - * @sf: Associated PCI SF for of the PCI PF for this port. - * @pf: Associated PCI PF number for this port. + * @sf: associated SF number of a PF for the devlink port instance + * @pf: associated PCI function number for the devlink port instance * @external: when set, indicates if a port is for an external controller */ struct devlink_port_pci_sf_attrs { @@ -1261,6 +1262,18 @@ enum devlink_trap_group_generic_id { .min_burst = _min_burst, \ } +#define devlink_fmsg_put(fmsg, name, value) ( \ + _Generic((value), \ + bool : devlink_fmsg_bool_pair_put, \ + u8 : devlink_fmsg_u8_pair_put, \ + u16 : devlink_fmsg_u32_pair_put, \ + u32 : devlink_fmsg_u32_pair_put, \ + u64 : devlink_fmsg_u64_pair_put, \ + int : devlink_fmsg_u32_pair_put, \ + char * : devlink_fmsg_string_pair_put, \ + const char * : devlink_fmsg_string_pair_put) \ + (fmsg, name, (value))) + enum { /* device supports reload operations */ DEVLINK_F_RELOAD = 1UL << 0, @@ -1522,6 +1535,7 @@ int devl_trylock(struct devlink *devlink); void devl_unlock(struct devlink *devlink); void devl_assert_locked(struct devlink *devlink); bool devl_lock_is_held(struct devlink *devlink); +DEFINE_GUARD(devl, struct devlink *, devl_lock(_T), devl_unlock(_T)); struct ib_device; @@ -1994,6 +2008,7 @@ int devlink_compat_switch_id_get(struct net_device *dev, int devlink_nl_port_handle_fill(struct sk_buff *msg, struct devlink_port *devlink_port); size_t devlink_nl_port_handle_size(struct devlink_port *devlink_port); +void devlink_fmsg_dump_skb(struct devlink_fmsg *fmsg, const struct sk_buff *skb); #else diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index 6c5a1ea209a2..32a34dfe8cc5 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -6,9 +6,13 @@ #define DEFINE_DROP_REASON(FN, FNe) \ FN(NOT_SPECIFIED) \ FN(NO_SOCKET) \ + FN(SOCKET_CLOSE) \ + FN(SOCKET_FILTER) \ + FN(SOCKET_RCVBUFF) \ + FN(UNIX_DISCONNECT) \ + FN(UNIX_SKIP_OOB) \ FN(PKT_TOO_SMALL) \ FN(TCP_CSUM) \ - FN(SOCKET_FILTER) \ FN(UDP_CSUM) \ FN(NETFILTER_DROP) \ FN(OTHERHOST) \ @@ -18,7 +22,6 @@ FN(UNICAST_IN_L2_MULTICAST) \ FN(XFRM_POLICY) \ FN(IP_NOPROTO) \ - FN(SOCKET_RCVBUFF) \ FN(PROTO_MEM) \ FN(TCP_AUTH_HDR) \ FN(TCP_MD5NOTFOUND) \ @@ -36,6 +39,7 @@ FN(TCP_OVERWINDOW) \ FN(TCP_OFOMERGE) \ FN(TCP_RFC7323_PAWS) \ + FN(TCP_RFC7323_PAWS_ACK) \ FN(TCP_OLD_SEQUENCE) \ FN(TCP_INVALID_SEQUENCE) \ FN(TCP_INVALID_ACK_SEQUENCE) \ @@ -58,6 +62,12 @@ FN(TC_EGRESS) \ FN(SECURITY_HOOK) \ FN(QDISC_DROP) \ + FN(QDISC_OVERLIMIT) \ + FN(QDISC_CONGESTED) \ + FN(CAKE_FLOOD) \ + FN(FQ_BAND_LIMIT) \ + FN(FQ_HORIZON_LIMIT) \ + FN(FQ_FLOW_LIMIT) \ FN(CPU_BACKLOG) \ FN(XDP) \ FN(TC_INGRESS) \ @@ -100,11 +110,13 @@ FN(VXLAN_VNI_NOT_FOUND) \ FN(MAC_INVALID_SOURCE) \ FN(VXLAN_ENTRY_EXISTS) \ - FN(VXLAN_NO_REMOTE) \ + FN(NO_TX_TARGET) \ FN(IP_TUNNEL_ECN) \ FN(TUNNEL_TXINFO) \ FN(LOCAL_MAC) \ FN(ARP_PVLAN_DISABLE) \ + FN(MAC_IEEE_MAC_CONTROL) \ + FN(BRIDGE_INGRESS_STP_STATE) \ FNe(MAX) /** @@ -129,12 +141,27 @@ enum skb_drop_reason { * 3) no valid child socket during 3WHS process */ SKB_DROP_REASON_NO_SOCKET, + /** @SKB_DROP_REASON_SOCKET_CLOSE: socket is close()d */ + SKB_DROP_REASON_SOCKET_CLOSE, + /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */ + SKB_DROP_REASON_SOCKET_FILTER, + /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */ + SKB_DROP_REASON_SOCKET_RCVBUFF, + /** + * @SKB_DROP_REASON_UNIX_DISCONNECT: recv queue is purged when SOCK_DGRAM + * or SOCK_SEQPACKET socket re-connect()s to another socket or notices + * during send() that the peer has been close()d. + */ + SKB_DROP_REASON_UNIX_DISCONNECT, + /** + * @SKB_DROP_REASON_UNIX_SKIP_OOB: Out-Of-Band data is skipped by + * recv() without MSG_OOB so dropped. + */ + SKB_DROP_REASON_UNIX_SKIP_OOB, /** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */ SKB_DROP_REASON_PKT_TOO_SMALL, /** @SKB_DROP_REASON_TCP_CSUM: TCP checksum error */ SKB_DROP_REASON_TCP_CSUM, - /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */ - SKB_DROP_REASON_SOCKET_FILTER, /** @SKB_DROP_REASON_UDP_CSUM: UDP checksum error */ SKB_DROP_REASON_UDP_CSUM, /** @SKB_DROP_REASON_NETFILTER_DROP: dropped by netfilter */ @@ -165,8 +192,6 @@ enum skb_drop_reason { SKB_DROP_REASON_XFRM_POLICY, /** @SKB_DROP_REASON_IP_NOPROTO: no support for IP protocol */ SKB_DROP_REASON_IP_NOPROTO, - /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */ - SKB_DROP_REASON_SOCKET_RCVBUFF, /** * @SKB_DROP_REASON_PROTO_MEM: proto memory limitation, such as * udp packet drop out of udp_memory_allocated. @@ -251,6 +276,11 @@ enum skb_drop_reason { * LINUX_MIB_PAWSESTABREJECTED, LINUX_MIB_PAWSACTIVEREJECTED */ SKB_DROP_REASON_TCP_RFC7323_PAWS, + /** + * @SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK: PAWS check, old ACK packet. + * Corresponds to LINUX_MIB_PAWS_OLD_ACK. + */ + SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK, /** @SKB_DROP_REASON_TCP_OLD_SEQUENCE: Old SEQ field (duplicate packet) */ SKB_DROP_REASON_TCP_OLD_SEQUENCE, /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */ @@ -312,6 +342,36 @@ enum skb_drop_reason { */ SKB_DROP_REASON_QDISC_DROP, /** + * @SKB_DROP_REASON_QDISC_OVERLIMIT: dropped by qdisc when a qdisc + * instance exceeds its total buffer size limit. + */ + SKB_DROP_REASON_QDISC_OVERLIMIT, + /** + * @SKB_DROP_REASON_QDISC_CONGESTED: dropped by a qdisc AQM algorithm + * due to congestion. + */ + SKB_DROP_REASON_QDISC_CONGESTED, + /** + * @SKB_DROP_REASON_CAKE_FLOOD: dropped by the flood protection part of + * CAKE qdisc AQM algorithm (BLUE). + */ + SKB_DROP_REASON_CAKE_FLOOD, + /** + * @SKB_DROP_REASON_FQ_BAND_LIMIT: dropped by fq qdisc when per band + * limit is reached. + */ + SKB_DROP_REASON_FQ_BAND_LIMIT, + /** + * @SKB_DROP_REASON_FQ_HORIZON_LIMIT: dropped by fq qdisc when packet + * timestamp is too far in the future. + */ + SKB_DROP_REASON_FQ_HORIZON_LIMIT, + /** + * @SKB_DROP_REASON_FQ_FLOW_LIMIT: dropped by fq qdisc when a flow + * exceeds its limits. + */ + SKB_DROP_REASON_FQ_FLOW_LIMIT, + /** * @SKB_DROP_REASON_CPU_BACKLOG: failed to enqueue the skb to the per CPU * backlog queue. This can be caused by backlog queue full (see * netdev_max_backlog in net.rst) or RPS flow limit @@ -461,8 +521,8 @@ enum skb_drop_reason { * entry or an entry pointing to a nexthop. */ SKB_DROP_REASON_VXLAN_ENTRY_EXISTS, - /** @SKB_DROP_REASON_VXLAN_NO_REMOTE: no remote found for xmit */ - SKB_DROP_REASON_VXLAN_NO_REMOTE, + /** @SKB_DROP_REASON_NO_TX_TARGET: no target found for xmit */ + SKB_DROP_REASON_NO_TX_TARGET, /** * @SKB_DROP_REASON_IP_TUNNEL_ECN: skb is dropped according to * RFC 6040 4.2, see __INET_ECN_decapsulate() for detail. @@ -485,6 +545,16 @@ enum skb_drop_reason { */ SKB_DROP_REASON_ARP_PVLAN_DISABLE, /** + * @SKB_DROP_REASON_MAC_IEEE_MAC_CONTROL: the destination MAC address + * is an IEEE MAC Control address. + */ + SKB_DROP_REASON_MAC_IEEE_MAC_CONTROL, + /** + * @SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE: the STP state of the + * ingress bridge port does not allow frames to be forwarded. + */ + SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE, + /** * @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which * shouldn't be used as a real 'reason' - only for tracing code gen */ diff --git a/include/net/dsa.h b/include/net/dsa.h index 72ae65e7246a..a0a9481c52c2 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -906,6 +906,8 @@ struct dsa_switch_ops { void (*get_rmon_stats)(struct dsa_switch *ds, int port, struct ethtool_rmon_stats *rmon_stats, const struct ethtool_rmon_hist_range **ranges); + void (*get_ts_stats)(struct dsa_switch *ds, int port, + struct ethtool_ts_stats *ts_stats); void (*get_stats64)(struct dsa_switch *ds, int port, struct rtnl_link_stats64 *s); void (*get_pause_stats)(struct dsa_switch *ds, int port, @@ -988,10 +990,9 @@ struct dsa_switch_ops { /* * Port's MAC EEE settings */ + bool (*support_eee)(struct dsa_switch *ds, int port); int (*set_mac_eee)(struct dsa_switch *ds, int port, struct ethtool_keee *e); - int (*get_mac_eee)(struct dsa_switch *ds, int port, - struct ethtool_keee *e); /* EEPROM access */ int (*get_eeprom_len)(struct dsa_switch *ds); @@ -1383,5 +1384,6 @@ static inline bool dsa_user_dev_check(const struct net_device *dev) netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev); void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up); +bool dsa_supports_eee(struct dsa_switch *ds, int port); #endif diff --git a/include/net/dst.h b/include/net/dst.h index 0f303cc60252..78c78cdce0e9 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -307,7 +307,7 @@ static inline bool dst_hold_safe(struct dst_entry *dst) * @skb: buffer * * If dst is not yet refcounted and not destroyed, grab a ref on it. - * Returns true if dst is refcounted. + * Returns: true if dst is refcounted. */ static inline bool skb_dst_force(struct sk_buff *skb) { @@ -440,6 +440,15 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout) dst->expires = expires; } +static inline unsigned int dst_dev_overhead(struct dst_entry *dst, + struct sk_buff *skb) +{ + if (likely(dst)) + return LL_RESERVED_SPACE(dst->dev); + + return skb->mac_len; +} + INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *, struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *, diff --git a/include/net/genetlink.h b/include/net/genetlink.h index d096cc6352de..a03d56765832 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -354,7 +354,7 @@ __genlmsg_iput(struct sk_buff *skb, const struct genl_info *info, int flags) * such requests) or a struct initialized by genl_info_init_ntf() * when constructing notifications. * - * Returns pointer to new genetlink header. + * Returns: pointer to new genetlink header. */ static inline void * genlmsg_iput(struct sk_buff *skb, const struct genl_info *info) @@ -366,7 +366,7 @@ genlmsg_iput(struct sk_buff *skb, const struct genl_info *info) * genlmsg_nlhdr - Obtain netlink header from user specified header * @user_hdr: user header as returned from genlmsg_put() * - * Returns pointer to netlink header. + * Returns: pointer to netlink header. */ static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr) { @@ -435,7 +435,7 @@ static inline void genl_dump_check_consistent(struct netlink_callback *cb, * @flags: netlink message flags * @cmd: generic netlink command * - * Returns pointer to user specific header + * Returns: pointer to user specific header */ static inline void *genlmsg_put_reply(struct sk_buff *skb, struct genl_info *info, diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 56d8bc5593d3..1086256549fa 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -19,6 +19,7 @@ #include <linux/netdevice.h> #include <net/flow.h> +#include <net/inet_dscp.h> #include <net/sock.h> #include <net/request_sock.h> #include <net/netns/hash.h> @@ -172,7 +173,7 @@ struct inet_cork { u8 tx_flags; __u8 ttl; __s16 tos; - char priority; + u32 priority; __u16 gso_size; u32 ts_opt_id; u64 transmit_time; @@ -302,6 +303,11 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet) return READ_ONCE(inet->inet_flags) & IP_CMSG_ALL; } +static inline dscp_t inet_sk_dscp(const struct inet_sock *inet) +{ + return inet_dsfield_to_dscp(READ_ONCE(inet->tos)); +} + #define inet_test_bit(nr, sk) \ test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) #define inet_set_bit(nr, sk) \ diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 62c0a7e65d6b..67a313575780 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -74,6 +74,10 @@ struct inet_timewait_sock { tw_tos : 8; u32 tw_txhash; u32 tw_priority; + /** + * @tw_reuse_stamp: Time of entry into %TCP_TIME_WAIT state in msec. + */ + u32 tw_entry_stamp; struct timer_list tw_timer; struct inet_bind_bucket *tw_tb; struct inet_bind2_bucket *tw_tb2; diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 74ff688568a0..f475757daafb 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -96,30 +96,28 @@ static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr) /* can be called with or without local BH being disabled */ struct inet_peer *inet_getpeer(struct inet_peer_base *base, - const struct inetpeer_addr *daddr, - int create); + const struct inetpeer_addr *daddr); static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base, __be32 v4daddr, - int vif, int create) + int vif) { struct inetpeer_addr daddr; daddr.a4.addr = v4daddr; daddr.a4.vif = vif; daddr.family = AF_INET; - return inet_getpeer(base, &daddr, create); + return inet_getpeer(base, &daddr); } static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base, - const struct in6_addr *v6daddr, - int create) + const struct in6_addr *v6daddr) { struct inetpeer_addr daddr; daddr.a6 = *v6daddr; daddr.family = AF_INET6; - return inet_getpeer(base, &daddr, create); + return inet_getpeer(base, &daddr); } static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a, diff --git a/include/net/ip.h b/include/net/ip.h index 0e548c1f2a0e..9f5e33e371fc 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -81,7 +81,6 @@ struct ipcm_cookie { __u8 protocol; __u8 ttl; __s16 tos; - char priority; __u16 gso_size; }; @@ -96,6 +95,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm, ipcm_init(ipcm); ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark); + ipcm->sockc.priority = READ_ONCE(inet->sk.sk_priority); ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags); ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if); ipcm->addr = inet->inet_saddr; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 248bfb26e2af..f5c43ad1565e 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -471,7 +471,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, /* This helper is specialized for BIG TCP needs. * It assumes the hop_jumbo_hdr will immediately follow the IPV6 header. * It assumes headers are already in skb->head. - * Returns 0, or IPPROTO_TCP if a BIG TCP packet is there. + * Returns: 0, or IPPROTO_TCP if a BIG TCP packet is there. */ static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb) { diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h index dd9e93c12260..9804fa5d9c67 100644 --- a/include/net/iucv/iucv.h +++ b/include/net/iucv/iucv.h @@ -202,7 +202,7 @@ struct iucv_handler { * * Registers a driver with IUCV. * - * Returns 0 on success, -ENOMEM if the memory allocation for the pathid + * Returns: 0 on success, -ENOMEM if the memory allocation for the pathid * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. */ int iucv_register(struct iucv_handler *handler, int smp); @@ -224,7 +224,7 @@ void iucv_unregister(struct iucv_handler *handle, int smp); * * Allocate a new path structure for use with iucv_connect. * - * Returns NULL if the memory allocation failed or a pointer to the + * Returns: NULL if the memory allocation failed or a pointer to the * path structure. */ static inline struct iucv_path *iucv_path_alloc(u16 msglim, u8 flags, gfp_t gfp) @@ -260,7 +260,7 @@ static inline void iucv_path_free(struct iucv_path *path) * This function is issued after the user received a connection pending * external interrupt and now wishes to complete the IUCV communication path. * - * Returns the result of the CP IUCV call. + * Returns: the result of the CP IUCV call. */ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, u8 *userdata, void *private); @@ -278,7 +278,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, * successfully, you are not able to use the path until you receive an IUCV * Connection Complete external interrupt. * - * Returns the result of the CP IUCV call. + * Returns: the result of the CP IUCV call. */ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, u8 *userid, u8 *system, u8 *userdata, @@ -292,7 +292,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, * This function temporarily suspends incoming messages on an IUCV path. * You can later reactivate the path by invoking the iucv_resume function. * - * Returns the result from the CP IUCV call. + * Returns: the result from the CP IUCV call. */ int iucv_path_quiesce(struct iucv_path *path, u8 *userdata); @@ -304,7 +304,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 *userdata); * This function resumes incoming messages on an IUCV path that has * been stopped with iucv_path_quiesce. * - * Returns the result from the CP IUCV call. + * Returns: the result from the CP IUCV call. */ int iucv_path_resume(struct iucv_path *path, u8 *userdata); @@ -315,7 +315,7 @@ int iucv_path_resume(struct iucv_path *path, u8 *userdata); * * This function terminates an IUCV path. * - * Returns the result from the CP IUCV call. + * Returns: the result from the CP IUCV call. */ int iucv_path_sever(struct iucv_path *path, u8 *userdata); @@ -327,7 +327,7 @@ int iucv_path_sever(struct iucv_path *path, u8 *userdata); * * Cancels a message you have sent. * - * Returns the result from the CP IUCV call. + * Returns: the result from the CP IUCV call. */ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, u32 srccls); @@ -347,7 +347,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, * * Locking: local_bh_enable/local_bh_disable * - * Returns the result from the CP IUCV call. + * Returns: the result from the CP IUCV call. */ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *buffer, size_t size, size_t *residual); @@ -367,7 +367,7 @@ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, * * Locking: no locking. * - * Returns the result from the CP IUCV call. + * Returns: the result from the CP IUCV call. */ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *buffer, size_t size, @@ -382,7 +382,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, * are notified of a message and the time that you complete the message, * the message may be rejected. * - * Returns the result from the CP IUCV call. + * Returns: the result from the CP IUCV call. */ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg); @@ -399,7 +399,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg); * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into * the parameter list. * - * Returns the result from the CP IUCV call. + * Returns: the result from the CP IUCV call. */ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *reply, size_t size); @@ -419,7 +419,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, * * Locking: local_bh_enable/local_bh_disable * - * Returns the result from the CP IUCV call. + * Returns: the result from the CP IUCV call. */ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, size_t size); @@ -439,7 +439,7 @@ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, * * Locking: no locking. * - * Returns the result from the CP IUCV call. + * Returns: the result from the CP IUCV call. */ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, size_t size); @@ -461,7 +461,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, * reply to the message and a buffer is provided into which IUCV moves * the reply to this message. * - * Returns the result from the CP IUCV call. + * Returns: the result from the CP IUCV call. */ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, size_t size, diff --git a/include/net/mac80211.h b/include/net/mac80211.h index ab8dce1f2c27..c3ed2fcff8b7 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1855,6 +1855,9 @@ struct ieee80211_channel_switch { * operation on this interface and request a channel context without * the AP definition. Use this e.g. because the device is able to * handle OFDMA (downlink and trigger for uplink) on a per-AP basis. + * @IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC: indicates that the AP sta should + * be removed only after setting the vif as unassociated, and not the + * opposite. Only relevant for STA vifs. */ enum ieee80211_vif_flags { IEEE80211_VIF_BEACON_FILTER = BIT(0), @@ -1863,6 +1866,7 @@ enum ieee80211_vif_flags { IEEE80211_VIF_GET_NOA_UPDATE = BIT(3), IEEE80211_VIF_EML_ACTIVE = BIT(4), IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW = BIT(5), + IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC = BIT(6), }; @@ -2216,7 +2220,7 @@ enum ieee80211_key_flags { * @tx_pn: PN used for TX keys, may be used by the driver as well if it * needs to do software PN assignment by itself (e.g. due to TSO) * @flags: key flags, see &enum ieee80211_key_flags. - * @keyidx: the key index (0-3) + * @keyidx: the key index (0-7) * @keylen: key material length * @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte) * data block: @@ -2225,7 +2229,7 @@ enum ieee80211_key_flags { * - Temporal Authenticator Rx MIC Key (64 bits) * @icv_len: The ICV length for this key type * @iv_len: The IV length for this key type - * @link_id: the link ID for MLO, or -1 for non-MLO or pairwise keys + * @link_id: the link ID, 0 for non-MLO, or -1 for pairwise keys */ struct ieee80211_key_conf { atomic64_t tx_pn; @@ -2336,6 +2340,8 @@ enum ieee80211_sta_rx_bandwidth { IEEE80211_STA_RX_BW_320, }; +#define IEEE80211_STA_RX_BW_MAX IEEE80211_STA_RX_BW_320 + /** * struct ieee80211_sta_rates - station rate selection table * @@ -4762,7 +4768,7 @@ struct ieee80211_ops { u32 (*get_expected_throughput)(struct ieee80211_hw *hw, struct ieee80211_sta *sta); int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - int *dbm); + unsigned int link_id, int *dbm); int (*tdls_channel_switch)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -7734,6 +7740,50 @@ ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width) } } +/** + * ieee80211_prepare_rx_omi_bw - prepare for sending BW RX OMI + * @link_sta: the link STA the OMI is going to be sent to + * @bw: the bandwidth requested + * + * When the driver decides to do RX OMI to change bandwidth with a STA + * it calls this function to prepare, then sends the OMI, and finally + * calls ieee80211_finalize_rx_omi_bw(). + * + * Note that the (link) STA rate control is updated accordingly as well, + * but the chanctx might not be updated if there are other users. + * If the intention is to reduce the listen bandwidth, the driver must + * ensure there are no TDLS stations nor other uses of the chanctx. + * + * Also note that in order to sequence correctly, narrowing bandwidth + * will only happen in ieee80211_finalize_rx_omi_bw(), whereas widening + * again (e.g. going back to normal) will happen here. + * + * Note that we treat this symmetrically, so if the driver calls this + * and tells the peer to only send with a lower bandwidth, we assume + * that the driver also wants to only send at that lower bandwidth, to + * allow narrowing of the chanctx request for this station/interface. + * + * Finally, the driver must ensure that if the function returned %true, + * ieee80211_finalize_rx_omi_bw() is also called, even for example in + * case of HW restart. + * + * Context: Must be called with wiphy mutex held, and will call back + * into the driver, so ensure no driver locks are held. + * + * Return: %true if changes are going to be made, %false otherwise + */ +bool ieee80211_prepare_rx_omi_bw(struct ieee80211_link_sta *link_sta, + enum ieee80211_sta_rx_bandwidth bw); + +/** + * ieee80211_finalize_rx_omi_bw - finalize BW RX OMI update + * @link_sta: the link STA the OMI was sent to + * + * See ieee80211_client_prepare_rx_omi_bw(). Context is the same here + * as well. + */ +void ieee80211_finalize_rx_omi_bw(struct ieee80211_link_sta *link_sta); + /* for older drivers - let's not document these ... */ int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx); diff --git a/include/net/macsec.h b/include/net/macsec.h index de216cbc6b05..bc7de5b53e54 100644 --- a/include/net/macsec.h +++ b/include/net/macsec.h @@ -38,8 +38,8 @@ struct metadata_dst; typedef union salt { struct { - u32 ssci; - u64 pn; + ssci_t ssci; + __be64 pn; } __packed; u8 bytes[MACSEC_SALT_LEN]; } __packed salt_t; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 5a2a0df8ad91..0f5eb9db0c62 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -210,6 +210,8 @@ void net_ns_barrier(void); struct ns_common *get_net_ns(struct ns_common *ns); struct net *get_net_ns_by_fd(int fd); +extern struct task_struct *cleanup_net_task; + #else /* CONFIG_NET_NS */ #include <linux/sched.h> #include <linux/nsproxy.h> diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h index 5ca019d294ca..b02bb9f109d5 100644 --- a/include/net/netdev_queues.h +++ b/include/net/netdev_queues.h @@ -4,6 +4,16 @@ #include <linux/netdevice.h> +/** + * struct netdev_config - queue-related configuration for a netdev + * @hds_thresh: HDS Threshold value. + * @hds_config: HDS value from userspace. + */ +struct netdev_config { + u32 hds_thresh; + u8 hds_config; +}; + /* See the netdev.yaml spec for definition of each statistic */ struct netdev_queue_stats_rx { u64 bytes; diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index cba3ccf03fcc..3f02a45773e8 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -204,8 +204,7 @@ bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, struct nf_conntrack_tuple *tuple); void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, - const struct sk_buff *skb, - u32 extra_jiffies, bool do_acct); + u32 extra_jiffies, unsigned int bytes); /* Refresh conntrack for this many jiffies and do accounting */ static inline void nf_ct_refresh_acct(struct nf_conn *ct, @@ -213,15 +212,14 @@ static inline void nf_ct_refresh_acct(struct nf_conn *ct, const struct sk_buff *skb, u32 extra_jiffies) { - __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true); + __nf_ct_refresh_acct(ct, ctinfo, extra_jiffies, skb->len); } /* Refresh conntrack for this many jiffies */ static inline void nf_ct_refresh(struct nf_conn *ct, - const struct sk_buff *skb, u32 extra_jiffies) { - __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false); + __nf_ct_refresh_acct(ct, 0, extra_jiffies, 0); } /* kill conntrack and do accounting */ @@ -314,16 +312,6 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct) #define NF_CT_DAY (86400 * HZ) -/* Set an arbitrary timeout large enough not to ever expire, this save - * us a check for the IPS_OFFLOAD_BIT from the packet path via - * nf_ct_is_expired(). - */ -static inline void nf_ct_offload_timeout(struct nf_conn *ct) -{ - if (nf_ct_expires(ct) < NF_CT_DAY / 2) - WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY); -} - struct kernel_param; int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp); diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 0c1dac318e02..8dcf7c371ee9 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -12,6 +12,7 @@ #include <linux/netfilter/nf_conntrack_common.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> #include <net/netfilter/nf_conntrack_extend.h> +#include <asm/local64.h> enum nf_ct_ecache_state { NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */ @@ -20,6 +21,9 @@ enum nf_ct_ecache_state { struct nf_conntrack_ecache { unsigned long cache; /* bitops want long */ +#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP + local64_t timestamp; /* event timestamp, in nanoseconds */ +#endif u16 ctmask; /* bitmask of ct events to be delivered */ u16 expmask; /* bitmask of expect events to be delivered */ u32 missed; /* missed events */ @@ -108,6 +112,14 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) if (e == NULL) return; +#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP + /* renew only if this is the first cached event, so that the + * timestamp reflects the first, not the last, generated event. + */ + if (local64_read(&e->timestamp) && READ_ONCE(e->cache) == 0) + local64_set(&e->timestamp, ktime_get_real_ns()); +#endif + set_bit(event, &e->cache); #endif } diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index b63d53bb9dd6..d711642e78b5 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -163,6 +163,7 @@ struct flow_offload_tuple_rhash { enum nf_flow_flags { NF_FLOW_SNAT, NF_FLOW_DNAT, + NF_FLOW_CLOSING, NF_FLOW_TEARDOWN, NF_FLOW_HW, NF_FLOW_HW_DYING, diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 0027beca5cd5..60d5dcdb289c 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -442,6 +442,9 @@ struct nft_set_ext; * @remove: remove element from set * @walk: iterate over all set elements * @get: get set elements + * @ksize: kernel set size + * @usize: userspace set size + * @adjust_maxsize: delta to adjust maximum set size * @commit: commit set elements * @abort: abort set elements * @privsize: function to return size of set private data @@ -495,6 +498,9 @@ struct nft_set_ops { const struct nft_set *set, const struct nft_set_elem *elem, unsigned int flags); + u32 (*ksize)(u32 size); + u32 (*usize)(u32 size); + u32 (*adjust_maxsize)(const struct nft_set *set); void (*commit)(struct nft_set *set); void (*abort)(const struct nft_set *set); u64 (*privsize)(const struct nlattr * const nla[], @@ -1195,6 +1201,8 @@ struct nft_hook { struct list_head list; struct nf_hook_ops ops; struct rcu_head rcu; + char ifname[IFNAMSIZ]; + u8 ifnamelen; }; /** @@ -1230,8 +1238,6 @@ static inline bool nft_is_base_chain(const struct nft_chain *chain) return chain->flags & NFT_CHAIN_BASE; } -int __nft_release_basechain(struct nft_ctx *ctx); - unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv); static inline bool nft_use_inc(u32 *use) diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h index 5adf6fda11e8..06985530517b 100644 --- a/include/net/netfilter/nf_tproxy.h +++ b/include/net/netfilter/nf_tproxy.h @@ -49,7 +49,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr); * * nf_tproxy_handle_time_wait4() consumes the socket reference passed in. * - * Returns the listener socket if there's one, the TIME_WAIT socket if + * Returns: the listener socket if there's one, the TIME_WAIT socket if * no such listener is found, or NULL if the TCP header is incomplete. */ struct sock * @@ -108,7 +108,7 @@ nf_tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, * * nf_tproxy_handle_time_wait6() consumes the socket reference passed in. * - * Returns the listener socket if there's one, the TIME_WAIT socket if + * Returns: the listener socket if there's one, the TIME_WAIT socket if * no such listener is found, or NULL if the TCP header is incomplete. */ struct sock * diff --git a/include/net/netlink.h b/include/net/netlink.h index 39eaa6be6ca8..e015ffbed819 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -649,7 +649,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining) * @nlh: netlink message header * @remaining: number of bytes remaining in message stream * - * Returns the next netlink message in the message stream and + * Returns: the next netlink message in the message stream and * decrements remaining by the size of the current message. */ static inline struct nlmsghdr * @@ -676,7 +676,7 @@ nlmsg_next(const struct nlmsghdr *nlh, int *remaining) * exceeding maxtype will be rejected, policy must be specified, attributes * will be validated in the strictest way possible. * - * Returns 0 on success or a negative error code. + * Returns: 0 on success or a negative error code. */ static inline int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, @@ -701,7 +701,7 @@ static inline int nla_parse(struct nlattr **tb, int maxtype, * exceeding maxtype will be ignored and attributes from the policy are not * always strictly validated (only for new attributes). * - * Returns 0 on success or a negative error code. + * Returns: 0 on success or a negative error code. */ static inline int nla_parse_deprecated(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, @@ -726,7 +726,7 @@ static inline int nla_parse_deprecated(struct nlattr **tb, int maxtype, * exceeding maxtype will be rejected as well as trailing data, but the * policy is not completely strictly validated (only for new attributes). * - * Returns 0 on success or a negative error code. + * Returns: 0 on success or a negative error code. */ static inline int nla_parse_deprecated_strict(struct nlattr **tb, int maxtype, const struct nlattr *head, @@ -833,7 +833,7 @@ nlmsg_parse_deprecated_strict(const struct nlmsghdr *nlh, int hdrlen, * @hdrlen: length of family specific header * @attrtype: type of attribute to look for * - * Returns the first attribute which matches the specified type. + * Returns: the first attribute which matches the specified type. */ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh, int hdrlen, int attrtype) @@ -854,7 +854,7 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh, * specified policy. Validation is done in liberal mode. * See documentation of struct nla_policy for more details. * - * Returns 0 on success or a negative error code. + * Returns: 0 on success or a negative error code. */ static inline int nla_validate_deprecated(const struct nlattr *head, int len, int maxtype, @@ -877,7 +877,7 @@ static inline int nla_validate_deprecated(const struct nlattr *head, int len, * specified policy. Validation is done in strict mode. * See documentation of struct nla_policy for more details. * - * Returns 0 on success or a negative error code. + * Returns: 0 on success or a negative error code. */ static inline int nla_validate(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, @@ -914,7 +914,7 @@ static inline int nlmsg_validate_deprecated(const struct nlmsghdr *nlh, * nlmsg_report - need to report back to application? * @nlh: netlink message header * - * Returns 1 if a report back to the application is requested. + * Returns: 1 if a report back to the application is requested. */ static inline int nlmsg_report(const struct nlmsghdr *nlh) { @@ -925,7 +925,7 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh) * nlmsg_seq - return the seq number of netlink message * @nlh: netlink message header * - * Returns 0 if netlink message is NULL + * Returns: 0 if netlink message is NULL */ static inline u32 nlmsg_seq(const struct nlmsghdr *nlh) { @@ -952,7 +952,7 @@ static inline u32 nlmsg_seq(const struct nlmsghdr *nlh) * @payload: length of message payload * @flags: message flags * - * Returns NULL if the tailroom of the skb is insufficient to store + * Returns: NULL if the tailroom of the skb is insufficient to store * the message header and payload. */ static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, @@ -971,7 +971,7 @@ static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 se * * Append data to an existing nlmsg, used when constructing a message * with multiple fixed-format headers (which is rare). - * Returns NULL if the tailroom of the skb is insufficient to store + * Returns: NULL if the tailroom of the skb is insufficient to store * the extra payload. */ static inline void *nlmsg_append(struct sk_buff *skb, u32 size) @@ -993,7 +993,7 @@ static inline void *nlmsg_append(struct sk_buff *skb, u32 size) * @payload: length of message payload * @flags: message flags * - * Returns NULL if the tailroom of the skb is insufficient to store + * Returns: NULL if the tailroom of the skb is insufficient to store * the message header and payload. */ static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb, @@ -1050,7 +1050,7 @@ static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh) * nlmsg_get_pos - return current position in netlink message * @skb: socket buffer the message is stored in * - * Returns a pointer to the current tail of the message. + * Returns: a pointer to the current tail of the message. */ static inline void *nlmsg_get_pos(struct sk_buff *skb) { @@ -1276,7 +1276,7 @@ static inline int nla_ok(const struct nlattr *nla, int remaining) * @nla: netlink attribute * @remaining: number of bytes remaining in attribute stream * - * Returns the next netlink attribute in the attribute stream and + * Returns: the next netlink attribute in the attribute stream and * decrements remaining by the size of the current attribute. */ static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining) @@ -1292,7 +1292,7 @@ static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining) * @nla: attribute containing the nested attributes * @attrtype: type of attribute to look for * - * Returns the first attribute which matches the specified type. + * Returns: the first attribute which matches the specified type. */ static inline struct nlattr * nla_find_nested(const struct nlattr *nla, int attrtype) @@ -2091,7 +2091,7 @@ static inline int nla_get_flag(const struct nlattr *nla) * nla_get_msecs - return payload of msecs attribute * @nla: msecs netlink attribute * - * Returns the number of milliseconds in jiffies. + * Returns: the number of milliseconds in jiffies. */ static inline unsigned long nla_get_msecs(const struct nlattr *nla) { @@ -2183,7 +2183,7 @@ static inline void *nla_memdup_noprof(const struct nlattr *src, gfp_t gfp) * marked their nest attributes with NLA_F_NESTED flag. New APIs should use * nla_nest_start() which sets the flag. * - * Returns the container attribute or NULL on error + * Returns: the container attribute or NULL on error */ static inline struct nlattr *nla_nest_start_noflag(struct sk_buff *skb, int attrtype) @@ -2204,7 +2204,7 @@ static inline struct nlattr *nla_nest_start_noflag(struct sk_buff *skb, * Unlike nla_nest_start_noflag(), mark the nest attribute with NLA_F_NESTED * flag. This is the preferred function to use in new code. * - * Returns the container attribute or NULL on error + * Returns: the container attribute or NULL on error */ static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype) { @@ -2219,7 +2219,7 @@ static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype) * Corrects the container attribute header to include the all * appended attributes. * - * Returns the total data length of the skb. + * Returns: the total data length of the skb. */ static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start) { @@ -2252,7 +2252,7 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) * specified policy. Attributes with a type exceeding maxtype will be * ignored. See documentation of struct nla_policy for more details. * - * Returns 0 on success or a negative error code. + * Returns: 0 on success or a negative error code. */ static inline int __nla_validate_nested(const struct nlattr *start, int maxtype, const struct nla_policy *policy, @@ -2285,7 +2285,7 @@ nla_validate_nested_deprecated(const struct nlattr *start, int maxtype, * nla_need_padding_for_64bit - test 64-bit alignment of the next attribute * @skb: socket buffer the message is stored in * - * Return true if padding is needed to align the next attribute (nla_data()) to + * Return: true if padding is needed to align the next attribute (nla_data()) to * a 64-bit aligned area. */ static inline bool nla_need_padding_for_64bit(struct sk_buff *skb) @@ -2312,7 +2312,7 @@ static inline bool nla_need_padding_for_64bit(struct sk_buff *skb) * This will only be done in architectures which do not have * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined. * - * Returns zero on success or a negative error code. + * Returns: zero on success or a negative error code. */ static inline int nla_align_64bit(struct sk_buff *skb, int padattr) { diff --git a/include/net/netmem.h b/include/net/netmem.h index 8a6e20be4b9d..1b58faa4f20f 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -72,6 +72,22 @@ static inline bool netmem_is_net_iov(const netmem_ref netmem) return (__force unsigned long)netmem & NET_IOV; } +/** + * __netmem_to_page - unsafely get pointer to the &page backing @netmem + * @netmem: netmem reference to convert + * + * Unsafe version of netmem_to_page(). When @netmem is always page-backed, + * e.g. when it's a header buffer, performs faster and generates smaller + * object code (no check for the LSB, no WARN). When @netmem points to IOV, + * provokes undefined behaviour. + * + * Return: pointer to the &page (garbage if @netmem is not page-backed). + */ +static inline struct page *__netmem_to_page(netmem_ref netmem) +{ + return (__force struct page *)netmem; +} + /* This conversion fails (returns NULL) if the netmem_ref is not struct page * backed. */ @@ -80,7 +96,7 @@ static inline struct page *netmem_to_page(netmem_ref netmem) if (WARN_ON_ONCE(netmem_is_net_iov(netmem))) return NULL; - return (__force struct page *)netmem; + return __netmem_to_page(netmem); } static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem) @@ -103,6 +119,17 @@ static inline netmem_ref page_to_netmem(struct page *page) return (__force netmem_ref)page; } +/** + * virt_to_netmem - convert virtual memory pointer to a netmem reference + * @data: host memory pointer to convert + * + * Return: netmem reference to the &page backing this virtual address. + */ +static inline netmem_ref virt_to_netmem(const void *data) +{ + return page_to_netmem(virt_to_page(data)); +} + static inline int netmem_ref_count(netmem_ref netmem) { /* The non-pp refcount of net_iov is always 1. On net_iov, we only @@ -127,6 +154,22 @@ static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem) return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV); } +/** + * __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem + * @netmem: netmem reference to get the pointer from + * + * Unsafe version of netmem_get_pp(). When @netmem is always page-backed, + * e.g. when it's a header buffer, performs faster and generates smaller + * object code (avoids clearing the LSB). When @netmem points to IOV, + * provokes invalid memory access. + * + * Return: pointer to the &page_pool (garbage if @netmem is not page-backed). + */ +static inline struct page_pool *__netmem_get_pp(netmem_ref netmem) +{ + return __netmem_to_page(netmem)->pp; +} + static inline struct page_pool *netmem_get_pp(netmem_ref netmem) { return __netmem_clear_lsb(netmem)->pp; @@ -158,12 +201,43 @@ static inline netmem_ref netmem_compound_head(netmem_ref netmem) return page_to_netmem(compound_head(netmem_to_page(netmem))); } +/** + * __netmem_address - unsafely get pointer to the memory backing @netmem + * @netmem: netmem reference to get the pointer for + * + * Unsafe version of netmem_address(). When @netmem is always page-backed, + * e.g. when it's a header buffer, performs faster and generates smaller + * object code (no check for the LSB). When @netmem points to IOV, provokes + * undefined behaviour. + * + * Return: pointer to the memory (garbage if @netmem is not page-backed). + */ +static inline void *__netmem_address(netmem_ref netmem) +{ + return page_address(__netmem_to_page(netmem)); +} + static inline void *netmem_address(netmem_ref netmem) { if (netmem_is_net_iov(netmem)) return NULL; - return page_address(netmem_to_page(netmem)); + return __netmem_address(netmem); +} + +/** + * netmem_is_pfmemalloc - check if @netmem was allocated under memory pressure + * @netmem: netmem reference to check + * + * Return: true if @netmem is page-backed and the page was allocated under + * memory pressure, false otherwise. + */ +static inline bool netmem_is_pfmemalloc(netmem_ref netmem) +{ + if (netmem_is_net_iov(netmem)) + return false; + + return page_is_pfmemalloc(netmem_to_page(netmem)); } static inline unsigned long netmem_get_dma_addr(netmem_ref netmem) diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 3c014170e001..46452da35206 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -175,6 +175,7 @@ struct netns_ipv4 { u8 sysctl_tcp_retries2; u8 sysctl_tcp_orphan_retries; u8 sysctl_tcp_tw_reuse; + unsigned int sysctl_tcp_tw_reuse_delay; int sysctl_tcp_fin_timeout; u8 sysctl_tcp_sack; u8 sysctl_tcp_window_scaling; diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h index 60a5347922be..582a3d00cbe2 100644 --- a/include/net/page_pool/helpers.h +++ b/include/net/page_pool/helpers.h @@ -104,8 +104,7 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) * * Get a page fragment from the page allocator or page_pool caches. * - * Return: - * Return allocated page fragment, otherwise return NULL. + * Return: allocated page fragment, otherwise return NULL. */ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, unsigned int *offset, @@ -116,22 +115,22 @@ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, return page_pool_alloc_frag(pool, offset, size, gfp); } -static inline struct page *page_pool_alloc(struct page_pool *pool, - unsigned int *offset, - unsigned int *size, gfp_t gfp) +static inline netmem_ref page_pool_alloc_netmem(struct page_pool *pool, + unsigned int *offset, + unsigned int *size, gfp_t gfp) { unsigned int max_size = PAGE_SIZE << pool->p.order; - struct page *page; + netmem_ref netmem; if ((*size << 1) > max_size) { *size = max_size; *offset = 0; - return page_pool_alloc_pages(pool, gfp); + return page_pool_alloc_netmems(pool, gfp); } - page = page_pool_alloc_frag(pool, offset, *size, gfp); - if (unlikely(!page)) - return NULL; + netmem = page_pool_alloc_frag_netmem(pool, offset, *size, gfp); + if (unlikely(!netmem)) + return 0; /* There is very likely not enough space for another fragment, so append * the remaining size to the current fragment to avoid truesize @@ -142,7 +141,23 @@ static inline struct page *page_pool_alloc(struct page_pool *pool, pool->frag_offset = max_size; } - return page; + return netmem; +} + +static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool, + unsigned int *offset, + unsigned int *size) +{ + gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; + + return page_pool_alloc_netmem(pool, offset, size, gfp); +} + +static inline struct page *page_pool_alloc(struct page_pool *pool, + unsigned int *offset, + unsigned int *size, gfp_t gfp) +{ + return netmem_to_page(page_pool_alloc_netmem(pool, offset, size, gfp)); } /** @@ -155,8 +170,7 @@ static inline struct page *page_pool_alloc(struct page_pool *pool, * depending on the requested size in order to allocate memory with least memory * utilization and performance penalty. * - * Return: - * Return allocated page or page fragment, otherwise return NULL. + * Return: allocated page or page fragment, otherwise return NULL. */ static inline struct page *page_pool_dev_alloc(struct page_pool *pool, unsigned int *offset, @@ -190,8 +204,7 @@ static inline void *page_pool_alloc_va(struct page_pool *pool, * This is just a thin wrapper around the page_pool_alloc() API, and * it returns va of the allocated page or page fragment. * - * Return: - * Return the va for the allocated page or page fragment, otherwise return NULL. + * Return: the va for the allocated page or page fragment, otherwise return NULL. */ static inline void *page_pool_dev_alloc_va(struct page_pool *pool, unsigned int *size) @@ -302,7 +315,7 @@ static inline void page_pool_ref_page(struct page *page) page_pool_ref_netmem(page_to_netmem(page)); } -static inline bool page_pool_is_last_ref(netmem_ref netmem) +static inline bool page_pool_unref_and_test(netmem_ref netmem) { /* If page_pool_unref_page() returns 0, we were the last user */ return page_pool_unref_netmem(netmem, 1) == 0; @@ -317,7 +330,7 @@ static inline void page_pool_put_netmem(struct page_pool *pool, * allow registering MEM_TYPE_PAGE_POOL, but shield linker. */ #ifdef CONFIG_PAGE_POOL - if (!page_pool_is_last_ref(netmem)) + if (!page_pool_unref_and_test(netmem)) return; page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct); @@ -418,7 +431,21 @@ static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem) */ static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) { - return page_pool_get_dma_addr_netmem(page_to_netmem((struct page *)page)); + dma_addr_t ret = page->dma_addr; + + if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) + ret <<= PAGE_SHIFT; + + return ret; +} + +static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool, + const dma_addr_t dma_addr, + u32 offset, u32 dma_sync_size) +{ + dma_sync_single_range_for_cpu(pool->p.dev, dma_addr, + offset + pool->p.offset, dma_sync_size, + page_pool_get_dma_dir(pool)); } /** @@ -437,10 +464,21 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, const struct page *page, u32 offset, u32 dma_sync_size) { - dma_sync_single_range_for_cpu(pool->p.dev, - page_pool_get_dma_addr(page), - offset + pool->p.offset, dma_sync_size, - page_pool_get_dma_dir(pool)); + __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr(page), offset, + dma_sync_size); +} + +static inline void +page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool, + const netmem_ref netmem, u32 offset, + u32 dma_sync_size) +{ + if (!pool->dma_sync_for_cpu) + return; + + __page_pool_dma_sync_for_cpu(pool, + page_pool_get_dma_addr_netmem(netmem), + offset, dma_sync_size); } static inline bool page_pool_put(struct page_pool *pool) diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index c022c410abe3..ed4cd114180a 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -164,7 +164,8 @@ struct page_pool { bool has_init_callback:1; /* slow::init_callback is set */ bool dma_map:1; /* Perform DMA mapping */ - bool dma_sync:1; /* Perform DMA sync */ + bool dma_sync:1; /* Perform DMA sync for device */ + bool dma_sync_for_cpu:1; /* Perform DMA sync for cpu */ #ifdef CONFIG_PAGE_POOL_STATS bool system:1; /* This is a global percpu pool */ #endif @@ -242,7 +243,7 @@ struct page_pool { }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); -netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp); +netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp); struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size, gfp_t gfp); netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool, @@ -259,8 +260,7 @@ void page_pool_disable_direct_recycling(struct page_pool *pool); void page_pool_destroy(struct page_pool *pool); void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), const struct xdp_mem_info *mem); -void page_pool_put_page_bulk(struct page_pool *pool, void **data, - int count); +void page_pool_put_netmem_bulk(netmem_ref *data, u32 count); #else static inline void page_pool_destroy(struct page_pool *pool) { @@ -272,8 +272,7 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool, { } -static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, - int count) +static inline void page_pool_put_netmem_bulk(netmem_ref *data, u32 count) { } #endif diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index cf199af85c52..c64fd896b1f9 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -75,11 +75,11 @@ static inline bool tcf_block_non_null_shared(struct tcf_block *block) } #ifdef CONFIG_NET_CLS_ACT -DECLARE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key); +DECLARE_STATIC_KEY_FALSE(tcf_sw_enabled_key); static inline bool tcf_block_bypass_sw(struct tcf_block *block) { - return block && block->bypass_wanted; + return block && !atomic_read(&block->useswcnt); } #endif @@ -319,7 +319,7 @@ tcf_exts_hw_stats_update(const struct tcf_exts *exts, * tcf_exts_has_actions - check if at least one action is present * @exts: tc filter extensions handle * - * Returns true if at least one action is present. + * Returns: true if at least one action is present. */ static inline bool tcf_exts_has_actions(struct tcf_exts *exts) { @@ -501,7 +501,7 @@ int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, * through all ematches respecting their logic relations returning * as soon as the result is obvious. * - * Returns 1 if the ematch tree as-one matches, no ematches are configured + * Returns: 1 if the ematch tree as-one matches, no ematches are configured * or ematch is not enabled in the kernel, otherwise 0 is returned. */ static inline int tcf_em_tree_match(struct sk_buff *skb, @@ -760,6 +760,15 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, cls_common->extack = extack; } +static inline void tcf_proto_update_usesw(struct tcf_proto *tp, u32 flags) +{ + if (tp->usesw) + return; + if (tc_skip_sw(flags) && tc_in_hw(flags)) + return; + tp->usesw = true; +} + #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb) { diff --git a/include/net/route.h b/include/net/route.h index 84cb1e04f5cd..f86775be3e29 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -28,6 +28,7 @@ #include <net/arp.h> #include <net/ndisc.h> #include <net/inet_dscp.h> +#include <net/sock.h> #include <linux/in_route.h> #include <linux/rtnetlink.h> #include <linux/rcupdate.h> @@ -129,6 +130,33 @@ struct in_device; int ip_rt_init(void); void rt_cache_flush(struct net *net); void rt_flush_dev(struct net_device *dev); + +static inline void inet_sk_init_flowi4(const struct inet_sock *inet, + struct flowi4 *fl4) +{ + const struct ip_options_rcu *ip4_opt; + const struct sock *sk; + __be32 daddr; + + rcu_read_lock(); + ip4_opt = rcu_dereference(inet->inet_opt); + + /* Source routing option overrides the socket destination address */ + if (ip4_opt && ip4_opt->opt.srr) + daddr = ip4_opt->opt.faddr; + else + daddr = inet->inet_daddr; + rcu_read_unlock(); + + sk = &inet->sk; + flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark), + ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), + sk->sk_protocol, inet_sk_flowi_flags(sk), daddr, + inet->inet_saddr, inet->inet_dport, + inet->inet_sport, sk->sk_uid); + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); +} + struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *flp, const struct sk_buff *skb); struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *flp, @@ -185,20 +213,6 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi return ip_route_output_flow(net, fl4, sk); } -static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 *fl4, - __be32 daddr, __be32 saddr, - __be32 gre_key, __u8 tos, int oif) -{ - memset(fl4, 0, sizeof(*fl4)); - fl4->flowi4_oif = oif; - fl4->daddr = daddr; - fl4->saddr = saddr; - fl4->flowi4_tos = tos; - fl4->flowi4_proto = IPPROTO_GRE; - fl4->fl4_gre_key = gre_key; - return ip_route_output_key(net, fl4); -} - enum skb_drop_reason ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, dscp_t dscp, struct net_device *dev, diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 5d74fa7e694c..d635c5b47eba 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -425,6 +425,7 @@ struct tcf_proto { spinlock_t lock; bool deleting; bool counted; + bool usesw; refcount_t refcnt; struct rcu_head rcu; struct hlist_node destroy_ht_node; @@ -474,9 +475,7 @@ struct tcf_block { struct flow_block flow_block; struct list_head owner_list; bool keep_dst; - bool bypass_wanted; - atomic_t filtercnt; /* Number of filters */ - atomic_t skipswcnt; /* Number of skip_sw filters */ + atomic_t useswcnt; atomic_t offloadcnt; /* Number of oddloaded filters */ unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ @@ -1245,6 +1244,14 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, return NET_XMIT_DROP; } +static inline int qdisc_drop_reason(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free, + enum skb_drop_reason reason) +{ + tcf_set_drop_reason(skb, reason); + return qdisc_drop(skb, sch, to_free); +} + static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { diff --git a/include/net/scm.h b/include/net/scm.h index 0d35c7c77a74..22bb49589fde 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -105,16 +105,16 @@ static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, #ifdef CONFIG_SECURITY_NETWORK static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) { - char *secdata; - u32 seclen; + struct lsm_context ctx; int err; if (test_bit(SOCK_PASSSEC, &sock->flags)) { - err = security_secid_to_secctx(scm->secid, &secdata, &seclen); + err = security_secid_to_secctx(scm->secid, &ctx); - if (!err) { - put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, seclen, secdata); - security_release_secctx(secdata, seclen); + if (err >= 0) { + put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, ctx.len, + ctx.context); + security_release_secctx(&ctx); } } } diff --git a/include/net/sock.h b/include/net/sock.h index 691ca7695d1d..8036b3b79cd8 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -953,6 +953,7 @@ enum sock_flags { SOCK_XDP, /* XDP is attached */ SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */ SOCK_RCVMARK, /* Receive SO_MARK ancillary data with packet */ + SOCK_RCVPRIORITY, /* Receive SO_PRIORITY ancillary data with packet */ }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) @@ -1820,13 +1821,15 @@ struct sockcm_cookie { u32 mark; u32 tsflags; u32 ts_opt_id; + u32 priority; }; static inline void sockcm_init(struct sockcm_cookie *sockc, const struct sock *sk) { *sockc = (struct sockcm_cookie) { - .tsflags = READ_ONCE(sk->sk_tsflags) + .tsflags = READ_ONCE(sk->sk_tsflags), + .priority = READ_ONCE(sk->sk_priority), }; } @@ -2661,7 +2664,8 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk, { #define FLAGS_RECV_CMSGS ((1UL << SOCK_RXQ_OVFL) | \ (1UL << SOCK_RCVTSTAMP) | \ - (1UL << SOCK_RCVMARK)) + (1UL << SOCK_RCVMARK) |\ + (1UL << SOCK_RCVPRIORITY)) #define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \ SOF_TIMESTAMPING_RAW_HARDWARE) diff --git a/include/net/tcp.h b/include/net/tcp.h index e9b37b76e894..5b2b04835688 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1817,7 +1817,7 @@ int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp, * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash() * @c: returned tcp_sigpool for usage (uninitialized on failure) * - * Returns 0 on success, error otherwise. + * Returns: 0 on success, error otherwise. */ int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c); /** diff --git a/include/net/tls.h b/include/net/tls.h index 61fef2880114..857340338b69 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -59,6 +59,8 @@ struct tls_rec; #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type) +#define TLS_HANDSHAKE_KEYUPDATE 24 /* rfc8446 B.3: Key update */ + #define TLS_AAD_SPACE_SIZE 13 #define TLS_MAX_IV_SIZE 16 @@ -130,6 +132,7 @@ struct tls_sw_context_rx { u8 async_capable:1; u8 zc_capable:1; u8 reader_contended:1; + bool key_update_pending; struct tls_strparser strp; diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 33ba6fc151cf..2dd23ee2bacd 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -227,6 +227,7 @@ struct vxlan_config { unsigned int addrmax; bool no_share; enum ifla_vxlan_df df; + struct vxlanhdr reserved_bits; }; enum { diff --git a/include/net/xdp.h b/include/net/xdp.h index b5b10f2b88e5..4dafc5e021f1 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -11,6 +11,8 @@ #include <linux/netdevice.h> #include <linux/skbuff.h> /* skb_shared_info */ +#include <net/page_pool/types.h> + /** * DOC: XDP RX-queue information * @@ -87,7 +89,7 @@ struct xdp_buff { u32 flags; /* supported values defined in xdp_buff_flags */ }; -static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp) +static __always_inline bool xdp_buff_has_frags(const struct xdp_buff *xdp) { return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS); } @@ -102,7 +104,8 @@ static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp) xdp->flags &= ~XDP_FLAGS_HAS_FRAGS; } -static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp) +static __always_inline bool +xdp_buff_is_frag_pfmemalloc(const struct xdp_buff *xdp) { return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); } @@ -143,15 +146,16 @@ xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start, SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) static inline struct skb_shared_info * -xdp_get_shared_info_from_buff(struct xdp_buff *xdp) +xdp_get_shared_info_from_buff(const struct xdp_buff *xdp) { return (struct skb_shared_info *)xdp_data_hard_end(xdp); } -static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp) +static __always_inline unsigned int +xdp_get_buff_len(const struct xdp_buff *xdp) { unsigned int len = xdp->data_end - xdp->data; - struct skb_shared_info *sinfo; + const struct skb_shared_info *sinfo; if (likely(!xdp_buff_has_frags(xdp))) goto out; @@ -162,26 +166,114 @@ out: return len; } +void xdp_return_frag(netmem_ref netmem, const struct xdp_buff *xdp); + +/** + * __xdp_buff_add_frag - attach frag to &xdp_buff + * @xdp: XDP buffer to attach the frag to + * @netmem: network memory containing the frag + * @offset: offset at which the frag starts + * @size: size of the frag + * @truesize: total memory size occupied by the frag + * @try_coalesce: whether to try coalescing the frags (not valid for XSk) + * + * Attach frag to the XDP buffer. If it currently has no frags attached, + * initialize the related fields, otherwise check that the frag number + * didn't reach the limit of ``MAX_SKB_FRAGS``. If possible, try coalescing + * the frag with the previous one. + * The function doesn't check/update the pfmemalloc bit. Please use the + * non-underscored wrapper in drivers. + * + * Return: true on success, false if there's no space for the frag in + * the shared info struct. + */ +static inline bool __xdp_buff_add_frag(struct xdp_buff *xdp, netmem_ref netmem, + u32 offset, u32 size, u32 truesize, + bool try_coalesce) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + skb_frag_t *prev; + u32 nr_frags; + + if (!xdp_buff_has_frags(xdp)) { + xdp_buff_set_frags_flag(xdp); + + nr_frags = 0; + sinfo->xdp_frags_size = 0; + sinfo->xdp_frags_truesize = 0; + + goto fill; + } + + nr_frags = sinfo->nr_frags; + prev = &sinfo->frags[nr_frags - 1]; + + if (try_coalesce && netmem == skb_frag_netmem(prev) && + offset == skb_frag_off(prev) + skb_frag_size(prev)) { + skb_frag_size_add(prev, size); + /* Guaranteed to only decrement the refcount */ + xdp_return_frag(netmem, xdp); + } else if (unlikely(nr_frags == MAX_SKB_FRAGS)) { + return false; + } else { +fill: + __skb_fill_netmem_desc_noacc(sinfo, nr_frags++, netmem, + offset, size); + } + + sinfo->nr_frags = nr_frags; + sinfo->xdp_frags_size += size; + sinfo->xdp_frags_truesize += truesize; + + return true; +} + +/** + * xdp_buff_add_frag - attach frag to &xdp_buff + * @xdp: XDP buffer to attach the frag to + * @netmem: network memory containing the frag + * @offset: offset at which the frag starts + * @size: size of the frag + * @truesize: total memory size occupied by the frag + * + * Version of __xdp_buff_add_frag() which takes care of the pfmemalloc bit. + * + * Return: true on success, false if there's no space for the frag in + * the shared info struct. + */ +static inline bool xdp_buff_add_frag(struct xdp_buff *xdp, netmem_ref netmem, + u32 offset, u32 size, u32 truesize) +{ + if (!__xdp_buff_add_frag(xdp, netmem, offset, size, truesize, true)) + return false; + + if (unlikely(netmem_is_pfmemalloc(netmem))) + xdp_buff_set_frag_pfmemalloc(xdp); + + return true; +} + struct xdp_frame { void *data; - u16 len; - u16 headroom; + u32 len; + u32 headroom; u32 metasize; /* uses lower 8-bits */ /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time, - * while mem info is valid on remote CPU. + * while mem_type is valid on remote CPU. */ - struct xdp_mem_info mem; + enum xdp_mem_type mem_type:32; struct net_device *dev_rx; /* used by cpumap */ u32 frame_sz; u32 flags; /* supported values defined in xdp_buff_flags */ }; -static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame) +static __always_inline bool xdp_frame_has_frags(const struct xdp_frame *frame) { return !!(frame->flags & XDP_FLAGS_HAS_FRAGS); } -static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame) +static __always_inline bool +xdp_frame_is_frag_pfmemalloc(const struct xdp_frame *frame) { return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); } @@ -189,18 +281,16 @@ static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame #define XDP_BULK_QUEUE_SIZE 16 struct xdp_frame_bulk { int count; - void *xa; - void *q[XDP_BULK_QUEUE_SIZE]; + netmem_ref q[XDP_BULK_QUEUE_SIZE]; }; static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq) { - /* bq->count will be zero'ed when bq->xa gets updated */ - bq->xa = NULL; + bq->count = 0; } static inline struct skb_shared_info * -xdp_get_shared_info_from_frame(struct xdp_frame *frame) +xdp_get_shared_info_from_frame(const struct xdp_frame *frame) { void *data_hard_start = frame->data - frame->headroom - sizeof(*frame); @@ -226,7 +316,14 @@ xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags, unsigned int size, unsigned int truesize, bool pfmemalloc) { - skb_shinfo(skb)->nr_frags = nr_frags; + struct skb_shared_info *sinfo = skb_shinfo(skb); + + sinfo->nr_frags = nr_frags; + /* + * ``destructor_arg`` is unionized with ``xdp_frags_{,true}size``, + * reset it after that these fields aren't used anymore. + */ + sinfo->destructor_arg = NULL; skb->len += size; skb->data_len += size; @@ -238,6 +335,8 @@ xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags, void xdp_warn(const char *msg, const char *func, const int line); #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__) +struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp); +struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp); struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, struct sk_buff *skb, @@ -248,7 +347,8 @@ int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp); struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf); static inline -void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) +void xdp_convert_frame_to_buff(const struct xdp_frame *frame, + struct xdp_buff *xdp) { xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame); xdp->data = frame->data; @@ -259,7 +359,7 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) } static inline -int xdp_update_frame_from_buff(struct xdp_buff *xdp, +int xdp_update_frame_from_buff(const struct xdp_buff *xdp, struct xdp_frame *xdp_frame) { int metasize, headroom; @@ -301,24 +401,33 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp) if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0)) return NULL; - /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */ - xdp_frame->mem = xdp->rxq->mem; + /* rxq only valid until napi_schedule ends, convert to xdp_mem_type */ + xdp_frame->mem_type = xdp->rxq->mem.type; return xdp_frame; } -void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, - struct xdp_buff *xdp); +void __xdp_return(netmem_ref netmem, enum xdp_mem_type mem_type, + bool napi_direct, struct xdp_buff *xdp); void xdp_return_frame(struct xdp_frame *xdpf); void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); void xdp_return_buff(struct xdp_buff *xdp); -void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq); void xdp_return_frame_bulk(struct xdp_frame *xdpf, struct xdp_frame_bulk *bq); -static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf) +static inline void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq) +{ + if (unlikely(!bq->count)) + return; + + page_pool_put_netmem_bulk(bq->q, bq->count); + bq->count = 0; +} + +static __always_inline unsigned int +xdp_get_frame_len(const struct xdp_frame *xdpf) { - struct skb_shared_info *sinfo; + const struct skb_shared_info *sinfo; unsigned int len = xdpf->len; if (likely(!xdp_frame_has_frags(xdpf))) @@ -350,6 +459,38 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq); int xdp_reg_mem_model(struct xdp_mem_info *mem, enum xdp_mem_type type, void *allocator); void xdp_unreg_mem_model(struct xdp_mem_info *mem); +int xdp_reg_page_pool(struct page_pool *pool); +void xdp_unreg_page_pool(const struct page_pool *pool); +void xdp_rxq_info_attach_page_pool(struct xdp_rxq_info *xdp_rxq, + const struct page_pool *pool); + +/** + * xdp_rxq_info_attach_mem_model - attach registered mem info to RxQ info + * @xdp_rxq: XDP RxQ info to attach the memory info to + * @mem: already registered memory info + * + * If the driver registers its memory providers manually, it must use this + * function instead of xdp_rxq_info_reg_mem_model(). + */ +static inline void +xdp_rxq_info_attach_mem_model(struct xdp_rxq_info *xdp_rxq, + const struct xdp_mem_info *mem) +{ + xdp_rxq->mem = *mem; +} + +/** + * xdp_rxq_info_detach_mem_model - detach registered mem info from RxQ info + * @xdp_rxq: XDP RxQ info to detach the memory info from + * + * If the driver registers its memory providers manually and then attaches it + * via xdp_rxq_info_attach_mem_model(), it must call this function before + * xdp_rxq_info_unreg(). + */ +static inline void xdp_rxq_info_detach_mem_model(struct xdp_rxq_info *xdp_rxq) +{ + xdp_rxq->mem = (struct xdp_mem_info){ }; +} /* Drivers not supporting XDP metadata can use this helper, which * rejects any room expansion for metadata as a result. diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 7a7316d9c0da..784cd34f5bba 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -92,7 +92,7 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) return xp_alloc(pool); } -static inline bool xsk_is_eop_desc(struct xdp_desc *desc) +static inline bool xsk_is_eop_desc(const struct xdp_desc *desc) { return !xp_mb_desc(desc); } @@ -127,14 +127,24 @@ out: xp_free(xskb); } -static inline void xsk_buff_add_frag(struct xdp_buff *xdp) +static inline bool xsk_buff_add_frag(struct xdp_buff *head, + struct xdp_buff *xdp) { - struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp); + const void *data = xdp->data; + struct xdp_buff_xsk *frag; + + if (!__xdp_buff_add_frag(head, virt_to_netmem(data), + offset_in_page(data), xdp->data_end - data, + xdp->frame_sz, false)) + return false; + frag = container_of(xdp, struct xdp_buff_xsk, xdp); list_add_tail(&frag->list_node, &frag->pool->xskb_list); + + return true; } -static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) +static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first) { struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); struct xdp_buff *ret = NULL; @@ -191,7 +201,8 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) XDP_TXMD_FLAGS_CHECKSUM | \ 0) -static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta) +static inline bool +xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta) { return !(meta->flags & ~XDP_TXMD_FLAGS_VALID); } @@ -323,7 +334,7 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) return NULL; } -static inline bool xsk_is_eop_desc(struct xdp_desc *desc) +static inline bool xsk_is_eop_desc(const struct xdp_desc *desc) { return false; } @@ -342,11 +353,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) { } -static inline void xsk_buff_add_frag(struct xdp_buff *xdp) +static inline bool xsk_buff_add_frag(struct xdp_buff *head, + struct xdp_buff *xdp) { + return false; } -static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) +static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first) { return NULL; } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 32c09e85a64c..4b0677e48190 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -38,6 +38,7 @@ #define XFRM_PROTO_COMP 108 #define XFRM_PROTO_IPIP 4 #define XFRM_PROTO_IPV6 41 +#define XFRM_PROTO_IPTFS IPPROTO_AGGFRAG #define XFRM_PROTO_ROUTING IPPROTO_ROUTING #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS @@ -213,6 +214,7 @@ struct xfrm_state { u16 family; xfrm_address_t saddr; int header_len; + int enc_hdr_len; int trailer_len; u32 extra_flags; struct xfrm_mark smark; @@ -303,6 +305,9 @@ struct xfrm_state { * interpreted by xfrm_type methods. */ void *data; u8 dir; + + const struct xfrm_mode_cbs *mode_cbs; + void *mode_data; }; static inline struct net *xs_net(struct xfrm_state *x) @@ -460,6 +465,45 @@ struct xfrm_type_offload { int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); +/** + * struct xfrm_mode_cbs - XFRM mode callbacks + * @owner: module owner or NULL + * @init_state: Add/init mode specific state in `xfrm_state *x` + * @clone_state: Copy mode specific values from `orig` to new state `x` + * @destroy_state: Cleanup mode specific state from `xfrm_state *x` + * @user_init: Process mode specific netlink attributes from user + * @copy_to_user: Add netlink attributes to `attrs` based on state in `x` + * @sa_len: Return space required to store mode specific netlink attributes + * @get_inner_mtu: Return avail payload space after removing encap overhead + * @input: Process received packet from SA using mode + * @output: Output given packet using mode + * @prepare_output: Add mode specific encapsulation to packet in skb. On return + * `transport_header` should point at ESP header, `network_header` should + * point at outer IP header and `mac_header` should opint at the + * protocol/nexthdr field of the outer IP. + * + * One should examine and understand the specific uses of these callbacks in + * xfrm for further detail on how and when these functions are called. RTSL. + */ +struct xfrm_mode_cbs { + struct module *owner; + int (*init_state)(struct xfrm_state *x); + int (*clone_state)(struct xfrm_state *x, struct xfrm_state *orig); + void (*destroy_state)(struct xfrm_state *x); + int (*user_init)(struct net *net, struct xfrm_state *x, + struct nlattr **attrs, + struct netlink_ext_ack *extack); + int (*copy_to_user)(struct xfrm_state *x, struct sk_buff *skb); + unsigned int (*sa_len)(const struct xfrm_state *x); + u32 (*get_inner_mtu)(struct xfrm_state *x, int outer_mtu); + int (*input)(struct xfrm_state *x, struct sk_buff *skb); + int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); + int (*prepare_output)(struct xfrm_state *x, struct sk_buff *skb); +}; + +int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs); +void xfrm_unregister_mode_cbs(u8 mode); + static inline int xfrm_af2proto(unsigned int family) { switch(family) { diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index bb03cee716b3..50779406bc2d 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -29,7 +29,7 @@ struct xdp_buff_xsk { dma_addr_t frame_dma; struct xsk_buff_pool *pool; struct list_head list_node; -}; +} __aligned_largest; #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb)) #define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t)) @@ -183,7 +183,7 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK); } -static inline bool xp_mb_desc(struct xdp_desc *desc) +static inline bool xp_mb_desc(const struct xdp_desc *desc) { return desc->options & XDP_PKT_CONTD; } diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h index 226ae3702d8a..2bf09b594d10 100644 --- a/include/rdma/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -64,22 +64,6 @@ int ib_find_cached_pkey(struct ib_device *device, u16 *index); /** - * ib_find_exact_cached_pkey - Returns the PKey table index where a specified - * PKey value occurs. Comparison uses the FULL 16 bits (incl membership bit) - * @device: The device to query. - * @port_num: The port number of the device to search for the PKey. - * @pkey: The PKey value to search for. - * @index: The index into the cached PKey table where the PKey was found. - * - * ib_find_exact_cached_pkey() searches the specified PKey table in - * the local software cache. - */ -int ib_find_exact_cached_pkey(struct ib_device *device, - u32 port_num, - u16 pkey, - u16 *index); - -/** * ib_get_cached_lmc - Returns a cached lmc table entry * @device: The device to query. * @port_num: The port number of the device to query. diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h index 1838869aad28..b179e464e3d1 100644 --- a/include/rdma/ib_marshall.h +++ b/include/rdma/ib_marshall.h @@ -22,7 +22,4 @@ void ib_copy_ah_attr_to_user(struct ib_device *device, void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, struct sa_path_rec *src); -void ib_copy_path_rec_from_user(struct sa_path_rec *dst, - struct ib_user_path_rec *src); - #endif /* IB_USER_MARSHALL_H */ diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h index b8c56d7dc35d..8266fab826a7 100644 --- a/include/rdma/ib_pack.h +++ b/include/rdma/ib_pack.h @@ -283,7 +283,4 @@ int ib_ud_header_init(int payload_bytes, int ib_ud_header_pack(struct ib_ud_header *header, void *buf); -int ib_ud_header_unpack(void *buf, - struct ib_ud_header *header); - #endif /* IB_PACK_H */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 3417636da960..0ad104dae253 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -59,9 +59,6 @@ extern struct workqueue_struct *ib_comp_unbound_wq; struct ib_ucq_object; -__printf(3, 4) __cold -void ibdev_printk(const char *level, const struct ib_device *ibdev, - const char *format, ...); __printf(2, 3) __cold void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...); __printf(2, 3) __cold @@ -2177,6 +2174,7 @@ struct ib_port_cache { struct ib_gid_table *gid; u8 lmc; enum ib_port_state port_state; + enum ib_port_state last_port_state; }; struct ib_port_immutable { @@ -2256,7 +2254,9 @@ struct rdma_netdev_alloc_params { struct ib_odp_counters { atomic64_t faults; + atomic64_t faults_handled; atomic64_t invalidations; + atomic64_t invalidations_handled; atomic64_t prefetch; }; @@ -2681,6 +2681,13 @@ struct ib_device_ops { */ void (*ufile_hw_cleanup)(struct ib_uverbs_file *ufile); + /** + * report_port_event - Drivers need to implement this if they have + * some private stuff to handle when link status changes. + */ + void (*report_port_event)(struct ib_device *ibdev, + struct net_device *ndev, unsigned long event); + DECLARE_RDMA_OBJ_SIZE(ib_ah); DECLARE_RDMA_OBJ_SIZE(ib_counters); DECLARE_RDMA_OBJ_SIZE(ib_cq); @@ -4469,6 +4476,17 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, unsigned int port); struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, u32 port); +int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev, + u32 *port); + +static inline enum ib_port_state ib_get_curr_port_state(struct net_device *net_dev) +{ + return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? + IB_PORT_ACTIVE : IB_PORT_DOWN; +} + +void ib_dispatch_port_state_event(struct ib_device *ibdev, + struct net_device *ndev); struct ib_wq *ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr); int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata); diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h index 9705b2a98e49..510c88bfabd4 100644 --- a/include/rv/da_monitor.h +++ b/include/rv/da_monitor.h @@ -14,6 +14,7 @@ #include <rv/automata.h> #include <linux/rv.h> #include <linux/bug.h> +#include <linux/sched.h> #ifdef CONFIG_RV_REACTORS @@ -324,10 +325,13 @@ static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk) static void da_monitor_reset_all_##name(void) \ { \ struct task_struct *g, *p; \ + int cpu; \ \ read_lock(&tasklist_lock); \ for_each_process_thread(g, p) \ da_monitor_reset_##name(da_get_monitor_##name(p)); \ + for_each_present_cpu(cpu) \ + da_monitor_reset_##name(da_get_monitor_##name(idle_task(cpu))); \ read_unlock(&tasklist_lock); \ } \ \ diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 4a9b4169e081..183d9fd50d2d 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -963,7 +963,7 @@ int fc_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); int fc_eh_abort(struct scsi_cmnd *); int fc_eh_device_reset(struct scsi_cmnd *); int fc_eh_host_reset(struct scsi_cmnd *); -int fc_slave_alloc(struct scsi_device *); +int fc_sdev_init(struct scsi_device *); /* * ELS/CT interface diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 1324068dd950..ba460b6c0374 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -683,8 +683,7 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset); int sas_phy_enable(struct sas_phy *phy, int enable); extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); extern int sas_target_alloc(struct scsi_target *); -int sas_device_configure(struct scsi_device *dev, - struct queue_limits *lim); +int sas_sdev_configure(struct scsi_device *dev, struct queue_limits *lim); extern int sas_change_queue_depth(struct scsi_device *, int new_depth); extern int sas_bios_param(struct scsi_device *, struct block_device *, sector_t capacity, int *hsc); @@ -703,7 +702,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd); int sas_eh_target_reset_handler(struct scsi_cmnd *cmd); extern void sas_target_destroy(struct scsi_target *); -extern int sas_slave_alloc(struct scsi_device *); +extern int sas_sdev_init(struct scsi_device *); extern int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg); extern int sas_drain_work(struct sas_ha_struct *ha); @@ -750,8 +749,8 @@ void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event, #endif #define LIBSAS_SHT_BASE _LIBSAS_SHT_BASE \ - .device_configure = sas_device_configure, \ - .slave_alloc = sas_slave_alloc, \ + .sdev_configure = sas_sdev_configure, \ + .sdev_init = sas_sdev_init, \ #define LIBSAS_SHT_BASE_NO_SLAVE_INIT _LIBSAS_SHT_BASE diff --git a/include/scsi/scsi_bsg_iscsi.h b/include/scsi/scsi_bsg_iscsi.h index 9b1f0f424a79..a569c35b258d 100644 --- a/include/scsi/scsi_bsg_iscsi.h +++ b/include/scsi/scsi_bsg_iscsi.h @@ -59,7 +59,7 @@ struct iscsi_bsg_host_vendor { */ struct iscsi_bsg_host_vendor_reply { /* start of vendor response area */ - uint32_t vendor_rsp[0]; + DECLARE_FLEX_ARRAY(uint32_t, vendor_rsp); }; diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 9c540f5468eb..7acd0ec82bb0 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -155,7 +155,7 @@ struct scsi_device { blist_flags_t sdev_bflags; /* black/white flags as also found in * scsi_devinfo.[hc]. For now used only to - * pass settings from slave_alloc to scsi + * pass settings from sdev_init to scsi * core. */ unsigned int eh_timeout; /* Error handling timeout */ @@ -357,7 +357,7 @@ struct scsi_target { atomic_t target_blocked; /* - * LLDs should set this in the slave_alloc host template callout. + * LLDs should set this in the sdev_init host template callout. * If set to zero then there is not limit. */ unsigned int can_queue; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 2b4ab0369ffb..26bc23419cfd 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -168,20 +168,20 @@ struct scsi_host_template { * Return values: 0 on success, non-0 on failure * * Deallocation: If we didn't find any devices at this ID, you will - * get an immediate call to slave_destroy(). If we find something - * here then you will get a call to slave_configure(), then the + * get an immediate call to sdev_destroy(). If we find something + * here then you will get a call to sdev_configure(), then the * device will be used for however long it is kept around, then when * the device is removed from the system (or * possibly at reboot - * time), you will then get a call to slave_destroy(). This is - * assuming you implement slave_configure and slave_destroy. + * time), you will then get a call to sdev_destroy(). This is + * assuming you implement sdev_configure and sdev_destroy. * However, if you allocate memory and hang it off the device struct, - * then you must implement the slave_destroy() routine at a minimum + * then you must implement the sdev_destroy() routine at a minimum * in order to avoid leaking memory * each time a device is tore down. * * Status: OPTIONAL */ - int (* slave_alloc)(struct scsi_device *); + int (* sdev_init)(struct scsi_device *); /* * Once the device has responded to an INQUIRY and we know the @@ -206,28 +206,24 @@ struct scsi_host_template { * specific setup basis... * 6. Return 0 on success, non-0 on error. The device will be marked * as offline on error so that no access will occur. If you return - * non-0, your slave_destroy routine will never get called for this + * non-0, your sdev_destroy routine will never get called for this * device, so don't leave any loose memory hanging around, clean * up after yourself before returning non-0 * * Status: OPTIONAL - * - * Note: slave_configure is the legacy version, use device_configure for - * all new code. A driver must never define both. */ - int (* device_configure)(struct scsi_device *, struct queue_limits *lim); - int (* slave_configure)(struct scsi_device *); + int (* sdev_configure)(struct scsi_device *, struct queue_limits *lim); /* * Immediately prior to deallocating the device and after all activity * has ceased the mid layer calls this point so that the low level * driver may completely detach itself from the scsi device and vice * versa. The low level driver is responsible for freeing any memory - * it allocated in the slave_alloc or slave_configure calls. + * it allocated in the sdev_init or sdev_configure calls. * * Status: OPTIONAL */ - void (* slave_destroy)(struct scsi_device *); + void (* sdev_destroy)(struct scsi_device *); /* * Before the mid layer attempts to scan for a new device attached @@ -438,8 +434,10 @@ struct scsi_host_template { */ short cmd_per_lun; - /* If use block layer to manage tags, this is tag allocation policy */ - int tag_alloc_policy; + /* + * Allocate tags starting from last allocated tag. + */ + bool tag_alloc_policy_rr : 1; /* * Track QUEUE_FULL events and reduce queue depth on demand. @@ -599,7 +597,7 @@ struct Scsi_Host { * have some way of identifying each detected host adapter properly * and uniquely. For hosts that do not support more than one card * in the system at one time, this does not need to be set. It is - * initialized to 0 in scsi_register. + * initialized to 0 in scsi_host_alloc. */ unsigned int unique_id; diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index bd1243657c01..5474494a1e99 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -447,10 +447,6 @@ extern int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id); extern int iscsi_session_event(struct iscsi_cls_session *session, enum iscsi_uevent_e event); -extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost, - struct iscsi_transport *t, - int dd_size, - unsigned int target_id); extern void iscsi_force_destroy_session(struct iscsi_cls_session *session); extern void iscsi_remove_session(struct iscsi_cls_session *session); extern void iscsi_free_session(struct iscsi_cls_session *session); diff --git a/include/soc/amlogic/reset-meson-aux.h b/include/soc/amlogic/reset-meson-aux.h deleted file mode 100644 index d8a15d48c984..000000000000 --- a/include/soc/amlogic/reset-meson-aux.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __SOC_RESET_MESON_AUX_H -#define __SOC_RESET_MESON_AUX_H - -#include <linux/err.h> - -struct device; -struct regmap; - -#if IS_ENABLED(CONFIG_RESET_MESON_AUX) -int devm_meson_rst_aux_register(struct device *dev, - struct regmap *map, - const char *adev_name); -#else -static inline int devm_meson_rst_aux_register(struct device *dev, - struct regmap *map, - const char *adev_name) -{ - return 0; -} -#endif - -#endif /* __SOC_RESET_MESON_AUX_H */ diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h index 73cac8d0287e..e1f87fbfe554 100644 --- a/include/soc/bcm2835/raspberrypi-firmware.h +++ b/include/soc/bcm2835/raspberrypi-firmware.h @@ -152,6 +152,7 @@ enum rpi_firmware_clk_id { RPI_FIRMWARE_M2MC_CLK_ID, RPI_FIRMWARE_PIXEL_BVB_CLK_ID, RPI_FIRMWARE_VEC_CLK_ID, + RPI_FIRMWARE_DISP_CLK_ID, RPI_FIRMWARE_NUM_CLK_ID, }; diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 2db9ae0575b6..6db7fc9dbaa4 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -759,6 +759,14 @@ struct ocelot_mm_state { u8 active_preemptible_tcs; }; +struct ocelot_ts_stats { + u64 pkts; + u64 onestep_pkts_unconfirmed; + u64 lost; + u64 err; + struct u64_stats_sync syncp; +}; + struct ocelot_port; struct ocelot_port { @@ -778,6 +786,7 @@ struct ocelot_port { phy_interface_t phy_mode; + struct ocelot_ts_stats *ts_stats; struct sk_buff_head tx_skbs; unsigned int trap_proto; @@ -1023,6 +1032,8 @@ void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port, struct ethtool_eth_mac_stats *mac_stats); void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port, struct ethtool_eth_phy_stats *phy_stats); +void ocelot_port_get_ts_stats(struct ocelot *ocelot, int port, + struct ethtool_ts_stats *ts_stats); int ocelot_get_ts_info(struct ocelot *ocelot, int port, struct kernel_ethtool_ts_info *info); void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs); diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h index 3acca067c72b..cff67ce25488 100644 --- a/include/soc/qcom/tcs.h +++ b/include/soc/qcom/tcs.h @@ -6,6 +6,9 @@ #ifndef __SOC_QCOM_TCS_H__ #define __SOC_QCOM_TCS_H__ +#include <linux/bitfield.h> +#include <linux/bits.h> + #define MAX_RPMH_PAYLOAD 16 /** @@ -60,22 +63,17 @@ struct tcs_request { struct tcs_cmd *cmds; }; -#define BCM_TCS_CMD_COMMIT_SHFT 30 -#define BCM_TCS_CMD_COMMIT_MASK 0x40000000 -#define BCM_TCS_CMD_VALID_SHFT 29 -#define BCM_TCS_CMD_VALID_MASK 0x20000000 -#define BCM_TCS_CMD_VOTE_X_SHFT 14 -#define BCM_TCS_CMD_VOTE_MASK 0x3fff -#define BCM_TCS_CMD_VOTE_Y_SHFT 0 -#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000 +#define BCM_TCS_CMD_COMMIT_MASK BIT(30) +#define BCM_TCS_CMD_VALID_MASK BIT(29) +#define BCM_TCS_CMD_VOTE_MASK GENMASK(13, 0) +#define BCM_TCS_CMD_VOTE_Y_MASK GENMASK(13, 0) +#define BCM_TCS_CMD_VOTE_X_MASK GENMASK(27, 14) /* Construct a Bus Clock Manager (BCM) specific TCS command */ #define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \ - (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \ - ((valid) << BCM_TCS_CMD_VALID_SHFT) | \ - ((cpu_to_le32(vote_x) & \ - BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \ - ((cpu_to_le32(vote_y) & \ - BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT)) + (u32_encode_bits(commit, BCM_TCS_CMD_COMMIT_MASK) | \ + u32_encode_bits(valid, BCM_TCS_CMD_VALID_MASK) | \ + u32_encode_bits(vote_x, BCM_TCS_CMD_VOTE_X_MASK) | \ + u32_encode_bits(vote_y, BCM_TCS_CMD_VOTE_Y_MASK)) #endif /* __SOC_QCOM_TCS_H__ */ diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index 957295364a5e..4c7a40e149a5 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -2,8 +2,6 @@ #ifndef __SOUND_HDAUDIO_EXT_H #define __SOUND_HDAUDIO_EXT_H -#include <linux/io-64-nonatomic-lo-hi.h> -#include <linux/iopoll.h> #include <sound/hdaudio.h> int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev, @@ -119,49 +117,6 @@ int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *hlink) void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable); -#define snd_hdac_adsp_writeb(chip, reg, value) \ - snd_hdac_reg_writeb(chip, (chip)->dsp_ba + (reg), value) -#define snd_hdac_adsp_readb(chip, reg) \ - snd_hdac_reg_readb(chip, (chip)->dsp_ba + (reg)) -#define snd_hdac_adsp_writew(chip, reg, value) \ - snd_hdac_reg_writew(chip, (chip)->dsp_ba + (reg), value) -#define snd_hdac_adsp_readw(chip, reg) \ - snd_hdac_reg_readw(chip, (chip)->dsp_ba + (reg)) -#define snd_hdac_adsp_writel(chip, reg, value) \ - snd_hdac_reg_writel(chip, (chip)->dsp_ba + (reg), value) -#define snd_hdac_adsp_readl(chip, reg) \ - snd_hdac_reg_readl(chip, (chip)->dsp_ba + (reg)) -#define snd_hdac_adsp_writeq(chip, reg, value) \ - snd_hdac_reg_writeq(chip, (chip)->dsp_ba + (reg), value) -#define snd_hdac_adsp_readq(chip, reg) \ - snd_hdac_reg_readq(chip, (chip)->dsp_ba + (reg)) - -#define snd_hdac_adsp_updateb(chip, reg, mask, val) \ - snd_hdac_adsp_writeb(chip, reg, \ - (snd_hdac_adsp_readb(chip, reg) & ~(mask)) | (val)) -#define snd_hdac_adsp_updatew(chip, reg, mask, val) \ - snd_hdac_adsp_writew(chip, reg, \ - (snd_hdac_adsp_readw(chip, reg) & ~(mask)) | (val)) -#define snd_hdac_adsp_updatel(chip, reg, mask, val) \ - snd_hdac_adsp_writel(chip, reg, \ - (snd_hdac_adsp_readl(chip, reg) & ~(mask)) | (val)) -#define snd_hdac_adsp_updateq(chip, reg, mask, val) \ - snd_hdac_adsp_writeq(chip, reg, \ - (snd_hdac_adsp_readq(chip, reg) & ~(mask)) | (val)) - -#define snd_hdac_adsp_readb_poll(chip, reg, val, cond, delay_us, timeout_us) \ - readb_poll_timeout((chip)->dsp_ba + (reg), val, cond, \ - delay_us, timeout_us) -#define snd_hdac_adsp_readw_poll(chip, reg, val, cond, delay_us, timeout_us) \ - readw_poll_timeout((chip)->dsp_ba + (reg), val, cond, \ - delay_us, timeout_us) -#define snd_hdac_adsp_readl_poll(chip, reg, val, cond, delay_us, timeout_us) \ - readl_poll_timeout((chip)->dsp_ba + (reg), val, cond, \ - delay_us, timeout_us) -#define snd_hdac_adsp_readq_poll(chip, reg, val, cond, delay_us, timeout_us) \ - readq_poll_timeout((chip)->dsp_ba + (reg), val, cond, \ - delay_us, timeout_us) - struct hdac_ext_device; /* ops common to all codec drivers */ diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h index 5e1a9eafd10f..b220072cfa1b 100644 --- a/include/sound/hdmi-codec.h +++ b/include/sound/hdmi-codec.h @@ -105,7 +105,8 @@ struct hdmi_codec_ops { * Optional */ int (*get_dai_id)(struct snd_soc_component *comment, - struct device_node *endpoint); + struct device_node *endpoint, + void *data); /* * Hook callback function to handle connector plug event. @@ -114,9 +115,6 @@ struct hdmi_codec_ops { int (*hook_plugged_cb)(struct device *dev, void *data, hdmi_codec_plugged_cb fn, struct device *codec_dev); - - /* bit field */ - unsigned int no_capture_mute:1; }; /* HDMI codec initalization data */ @@ -128,6 +126,7 @@ struct hdmi_codec_pdata { uint spdif:1; uint no_spdif_playback:1; uint no_spdif_capture:1; + uint no_capture_mute:1; int max_i2s_channels; void *data; }; diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 67c99ffbf51b..8becb4504887 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1532,9 +1532,10 @@ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format) dev_dbg((pcm)->card->dev, fmt, ##args) /* helpers for copying between iov_iter and iomem */ -int copy_to_iter_fromio(struct iov_iter *itert, const void __iomem *src, - size_t count); -int copy_from_iter_toio(void __iomem *dst, struct iov_iter *iter, size_t count); +size_t copy_to_iter_fromio(const void __iomem *src, size_t bytes, + struct iov_iter *iter) __must_check; +size_t copy_from_iter_toio(void __iomem *dst, size_t bytes, + struct iov_iter *iter) __must_check; struct snd_pcm_status64 { snd_pcm_state_t state; /* stream state */ diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index f31cabf0158c..6916f7133597 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -89,6 +89,7 @@ struct snd_rawmidi_substream { unsigned int framing; /* whether to frame input data */ unsigned int clock_type; /* clock source to use for input framing */ int use_count; /* use counter (for output) */ + bool inactive; /* inactive substream (for UMP legacy) */ size_t bytes; spinlock_t lock; struct snd_rawmidi *rmidi; @@ -118,6 +119,7 @@ struct snd_rawmidi { struct list_head list; unsigned int device; /* device number */ unsigned int info_flags; /* SNDRV_RAWMIDI_INFO_XXXX */ + unsigned int tied_device; char id[64]; char name[80]; @@ -189,4 +191,13 @@ long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream, long snd_rawmidi_kernel_write(struct snd_rawmidi_substream *substream, const unsigned char *buf, long count); +/* set up the tied devices */ +static inline void snd_rawmidi_tie_devices(struct snd_rawmidi *r1, + struct snd_rawmidi *r2) +{ + /* tied_device field keeps the device+1 (so that 0 being unknown) */ + r1->tied_device = r2->device + 1; + r2->tied_device = r1->device + 1; +} + #endif /* __SOUND_RAWMIDI_H */ diff --git a/include/sound/sdca.h b/include/sound/sdca.h index 7e138229e8f3..973252d0adac 100644 --- a/include/sound/sdca.h +++ b/include/sound/sdca.h @@ -9,6 +9,9 @@ #ifndef __SDCA_H__ #define __SDCA_H__ +#include <linux/types.h> +#include <linux/kconfig.h> + struct sdw_slave; #define SDCA_MAX_FUNCTION_COUNT 8 @@ -20,9 +23,9 @@ struct sdw_slave; * @name: human-readable string */ struct sdca_function_desc { - u64 adr; - u32 type; const char *name; + u32 type; + u8 adr; }; /** diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h index a01eec86b9a6..c051c17903e8 100644 --- a/include/sound/sdca_function.h +++ b/include/sound/sdca_function.h @@ -9,6 +9,8 @@ #ifndef __SDCA_FUNCTION_H__ #define __SDCA_FUNCTION_H__ +#include <linux/bits.h> + /* * SDCA Function Types from SDCA specification v1.0a Section 5.1.2 * all Function types not described are reserved @@ -40,16 +42,34 @@ enum sdca_function_type { #define SDCA_FUNCTION_TYPE_RJ_NAME "RJ" #define SDCA_FUNCTION_TYPE_SIMPLE_NAME "SimpleJack" #define SDCA_FUNCTION_TYPE_HID_NAME "HID" +#define SDCA_FUNCTION_TYPE_IMP_DEF_NAME "ImplementationDefined" enum sdca_entity0_controls { - SDCA_CONTROL_ENTITY_0_COMMIT_GROUP_MASK = 0x01, - SDCA_CONTROL_ENTITY_0_INTSTAT_CLEAR = 0x02, - SDCA_CONTROL_ENTITY_0_INT_ENABLE = 0x03, - SDCA_CONTROL_ENTITY_0_FUNCTION_SDCA_VERSION = 0x04, - SDCA_CONTROL_ENTITY_0_FUNCTION_TOPOLOGY = 0x05, - SDCA_CONTROL_ENTITY_0_FUNCTION_MANUFACTURER_ID = 0x06, - SDCA_CONTROL_ENTITY_0_FUNCTION_ID = 0x07, - SDCA_CONTROL_ENTITY_0_FUNCTION_VERSION = 0x08 + SDCA_CTL_ENTITY_0_COMMIT_GROUP_MASK = 0x01, + SDCA_CTL_ENTITY_0_FUNCTION_SDCA_VERSION = 0x04, + SDCA_CTL_ENTITY_0_FUNCTION_TYPE = 0x05, + SDCA_CTL_ENTITY_0_FUNCTION_MANUFACTURER_ID = 0x06, + SDCA_CTL_ENTITY_0_FUNCTION_ID = 0x07, + SDCA_CTL_ENTITY_0_FUNCTION_VERSION = 0x08, + SDCA_CTL_ENTITY_0_FUNCTION_EXTENSION_ID = 0x09, + SDCA_CTL_ENTITY_0_FUNCTION_EXTENSION_VERSION = 0x0A, + SDCA_CTL_ENTITY_0_FUNCTION_STATUS = 0x10, + SDCA_CTL_ENTITY_0_FUNCTION_ACTION = 0x11, + SDCA_CTL_ENTITY_0_MATCHING_GUID = 0x12, + SDCA_CTL_ENTITY_0_DEVICE_MANUFACTURER_ID = 0x2C, + SDCA_CTL_ENTITY_0_DEVICE_PART_ID = 0x2D, + SDCA_CTL_ENTITY_0_DEVICE_VERSION = 0x2E, + SDCA_CTL_ENTITY_0_DEVICE_SDCA_VERSION = 0x2F, + + /* Function Status Bits */ + SDCA_CTL_ENTITY_0_DEVICE_NEWLY_ATTACHED = BIT(0), + SDCA_CTL_ENTITY_0_INTS_DISABLED_ABNORMALLY = BIT(1), + SDCA_CTL_ENTITY_0_STREAMING_STOPPED_ABNORMALLY = BIT(2), + SDCA_CTL_ENTITY_0_FUNCTION_FAULT = BIT(3), + SDCA_CTL_ENTITY_0_UMP_SEQUENCE_FAULT = BIT(4), + SDCA_CTL_ENTITY_0_FUNCTION_NEEDS_INITIALIZATION = BIT(5), + SDCA_CTL_ENTITY_0_FUNCTION_HAS_BEEN_RESET = BIT(6), + SDCA_CTL_ENTITY_0_FUNCTION_BUSY = BIT(7), }; #endif diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index 3360d9eab068..892f70532363 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -89,6 +89,13 @@ struct simple_util_priv { #define simple_props_to_dai_codec(props, i) ((props)->codec_dai + i) #define simple_props_to_codec_conf(props, i) ((props)->codec_conf + i) +/* has the same effect as simple_priv_to_props(). Preferred over + * simple_priv_to_props() when dealing with PCM runtime data as + * the ID stored in rtd->id may not be a valid array index. + */ +#define runtime_simple_priv_to_props(priv, rtd) \ + ((priv)->dai_props + ((rtd)->dai_link - (priv)->dai_link)) + #define for_each_prop_dlc_cpus(props, i, cpu) \ for ((i) = 0; \ ((i) < (props)->num.cpus) && \ @@ -264,9 +271,13 @@ static inline void simple_util_debug_info(struct simple_util_priv *priv) simple_util_debug_dai(priv, "codec", dai); if (link->name) - dev_dbg(dev, "dai name = %s\n", link->name); + dev_dbg(dev, "link name = %s\n", link->name); if (link->dai_fmt) - dev_dbg(dev, "dai format = %04x\n", link->dai_fmt); + dev_dbg(dev, "link format = %04x\n", link->dai_fmt); + if (link->playback_only) + dev_dbg(dev, "link has playback_only"); + if (link->capture_only) + dev_dbg(dev, "link has capture_only"); if (props->adata.convert_rate) dev_dbg(dev, "convert_rate = %d\n", props->adata.convert_rate); if (props->adata.convert_channels) diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index aab57c19f62b..a11501752637 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -193,6 +193,9 @@ int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai, int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate); +int snd_soc_dai_prepare(struct snd_soc_dai *dai, + struct snd_pcm_substream *substream); + /* Digital Audio Interface mute */ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute, int direction); diff --git a/include/sound/soc.h b/include/sound/soc.h index 4f5d411e3823..fcdb5adfcd5e 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -681,6 +681,17 @@ struct snd_soc_dai_link_component { struct device_node *of_node; const char *dai_name; const struct of_phandle_args *dai_args; + + /* + * Extra format = SND_SOC_DAIFMT_Bx_Fx + * + * [Note] it is Bx_Fx base, not CBx_CFx + * + * It will be used with dai_link->dai_fmt + * see + * snd_soc_runtime_set_dai_fmt() + */ + unsigned int ext_fmt; }; /* @@ -1118,7 +1129,6 @@ struct snd_soc_card { unsigned int instantiated:1; unsigned int topology_shortname_created:1; unsigned int fully_routed:1; - unsigned int disable_route_checks:1; unsigned int probed:1; unsigned int component_chaining:1; diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h index 0e82598e10af..36a4a1e1d8ca 100644 --- a/include/sound/soc_sdw_utils.h +++ b/include/sound/soc_sdw_utils.h @@ -224,6 +224,8 @@ int asoc_sdw_cs_amp_init(struct snd_soc_card *card, struct snd_soc_dai_link *dai_links, struct asoc_sdw_codec_info *info, bool playback); +int asoc_sdw_cs_spk_feedback_rtd_init(struct snd_soc_pcm_runtime *rtd, + struct snd_soc_dai *dai); /* MAXIM codec support */ int asoc_sdw_maxim_init(struct snd_soc_card *card, diff --git a/include/sound/ump.h b/include/sound/ump.h index 532c2c3ea28e..73f97f88e2ed 100644 --- a/include/sound/ump.h +++ b/include/sound/ump.h @@ -83,6 +83,7 @@ struct snd_ump_ops { struct snd_seq_ump_ops { void (*input_receive)(struct snd_ump_endpoint *ump, const u32 *data, int words); + int (*notify_ep_change)(struct snd_ump_endpoint *ump); int (*notify_fb_change)(struct snd_ump_endpoint *ump, struct snd_ump_block *fb); int (*switch_protocol)(struct snd_ump_endpoint *ump); diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 60af7c63b34e..51ca80abacf7 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -576,9 +576,6 @@ struct iscsit_conn { spinlock_t state_lock; spinlock_t login_timer_lock; spinlock_t login_worker_lock; - /* libcrypto RX and TX contexts for crc32c */ - struct ahash_request *conn_rx_hash; - struct ahash_request *conn_tx_hash; /* Used for scheduling TX and RX connection kthreads */ cpumask_var_t conn_cpumask; cpumask_var_t allowed_cpumask; diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index a0aed1a428a1..b0db89058c91 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -118,6 +118,8 @@ enum yfs_cm_operation { */ #define afs_call_traces \ EM(afs_call_trace_alloc, "ALLOC") \ + EM(afs_call_trace_async_abort, "ASYAB") \ + EM(afs_call_trace_async_kill, "ASYKL") \ EM(afs_call_trace_free, "FREE ") \ EM(afs_call_trace_get, "GET ") \ EM(afs_call_trace_put, "PUT ") \ @@ -168,12 +170,14 @@ enum yfs_cm_operation { #define afs_cell_traces \ EM(afs_cell_trace_alloc, "ALLOC ") \ EM(afs_cell_trace_free, "FREE ") \ + EM(afs_cell_trace_get_atcell, "GET atcell") \ EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \ EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \ EM(afs_cell_trace_get_queue_new, "GET q-new ") \ EM(afs_cell_trace_get_vol, "GET vol ") \ EM(afs_cell_trace_insert, "INSERT ") \ EM(afs_cell_trace_manage, "MANAGE ") \ + EM(afs_cell_trace_put_atcell, "PUT atcell") \ EM(afs_cell_trace_put_candidate, "PUT candid") \ EM(afs_cell_trace_put_destroy, "PUT destry") \ EM(afs_cell_trace_put_queue_work, "PUT q-work") \ @@ -323,6 +327,44 @@ enum yfs_cm_operation { EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \ E_(yfs_CB_CallBack, "YFSCB.CallBack") +#define afs_cb_promise_traces \ + EM(afs_cb_promise_clear_cb_break, "CLEAR cb-break") \ + EM(afs_cb_promise_clear_rmdir, "CLEAR rmdir") \ + EM(afs_cb_promise_clear_rotate_server, "CLEAR rot-srv") \ + EM(afs_cb_promise_clear_server_change, "CLEAR srv-chg") \ + EM(afs_cb_promise_clear_vol_init_cb, "CLEAR vol-init-cb") \ + EM(afs_cb_promise_set_apply_cb, "SET apply-cb") \ + EM(afs_cb_promise_set_new_inode, "SET new-inode") \ + E_(afs_cb_promise_set_new_symlink, "SET new-symlink") + +#define afs_vnode_invalid_traces \ + EM(afs_vnode_invalid_trace_cb_ro_snapshot, "cb-ro-snapshot") \ + EM(afs_vnode_invalid_trace_cb_scrub, "cb-scrub") \ + EM(afs_vnode_invalid_trace_cb_v_break, "cb-v-break") \ + EM(afs_vnode_invalid_trace_expired, "expired") \ + EM(afs_vnode_invalid_trace_no_cb_promise, "no-cb-promise") \ + EM(afs_vnode_invalid_trace_vol_expired, "vol-expired") \ + EM(afs_vnode_invalid_trace_zap_data, "zap-data") \ + E_(afs_vnode_valid_trace, "valid") + +#define afs_dir_invalid_traces \ + EM(afs_dir_invalid_edit_add_bad_size, "edit-add-bad-size") \ + EM(afs_dir_invalid_edit_add_no_slots, "edit-add-no-slots") \ + EM(afs_dir_invalid_edit_add_too_many_blocks, "edit-add-too-many-blocks") \ + EM(afs_dir_invalid_edit_get_block, "edit-get-block") \ + EM(afs_dir_invalid_edit_mkdir, "edit-mkdir") \ + EM(afs_dir_invalid_edit_rem_bad_size, "edit-rem-bad-size") \ + EM(afs_dir_invalid_edit_rem_wrong_name, "edit-rem-wrong_name") \ + EM(afs_dir_invalid_edit_upd_bad_size, "edit-upd-bad-size") \ + EM(afs_dir_invalid_edit_upd_no_dd, "edit-upd-no-dotdot") \ + EM(afs_dir_invalid_dv_mismatch, "dv-mismatch") \ + EM(afs_dir_invalid_inval_folio, "inv-folio") \ + EM(afs_dir_invalid_iter_stale, "iter-stale") \ + EM(afs_dir_invalid_reclaimed_folio, "reclaimed-folio") \ + EM(afs_dir_invalid_release_folio, "rel-folio") \ + EM(afs_dir_invalid_remote, "remote") \ + E_(afs_dir_invalid_subdir_removed, "subdir-removed") + #define afs_edit_dir_ops \ EM(afs_edit_dir_create, "create") \ EM(afs_edit_dir_create_error, "c_fail") \ @@ -332,6 +374,7 @@ enum yfs_cm_operation { EM(afs_edit_dir_delete_error, "d_err ") \ EM(afs_edit_dir_delete_inval, "d_invl") \ EM(afs_edit_dir_delete_noent, "d_nent") \ + EM(afs_edit_dir_mkdir, "mk_ent") \ EM(afs_edit_dir_update_dd, "u_ddot") \ EM(afs_edit_dir_update_error, "u_fail") \ EM(afs_edit_dir_update_inval, "u_invl") \ @@ -385,6 +428,7 @@ enum yfs_cm_operation { EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \ EM(afs_file_error_dir_small, "DIR_SMALL") \ EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \ + EM(afs_file_error_symlink_big, "SYM_BIG") \ EM(afs_file_error_mntpt, "MNTPT_READ_FAILED") \ E_(afs_file_error_writeback_fail, "WRITEBACK_FAILED") @@ -487,7 +531,9 @@ enum yfs_cm_operation { enum afs_alist_trace { afs_alist_traces } __mode(byte); enum afs_call_trace { afs_call_traces } __mode(byte); enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte); +enum afs_cb_promise_trace { afs_cb_promise_traces } __mode(byte); enum afs_cell_trace { afs_cell_traces } __mode(byte); +enum afs_dir_invalid_trace { afs_dir_invalid_traces} __mode(byte); enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte); enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte); enum afs_eproto_cause { afs_eproto_causes } __mode(byte); @@ -498,6 +544,7 @@ enum afs_flock_operation { afs_flock_operations } __mode(byte); enum afs_io_error { afs_io_errors } __mode(byte); enum afs_rotate_trace { afs_rotate_traces } __mode(byte); enum afs_server_trace { afs_server_traces } __mode(byte); +enum afs_vnode_invalid_trace { afs_vnode_invalid_traces} __mode(byte); enum afs_volume_trace { afs_volume_traces } __mode(byte); #endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */ @@ -513,8 +560,10 @@ enum afs_volume_trace { afs_volume_traces } __mode(byte); afs_alist_traces; afs_call_traces; afs_cb_break_reasons; +afs_cb_promise_traces; afs_cell_traces; afs_cm_operations; +afs_dir_invalid_traces; afs_edit_dir_ops; afs_edit_dir_reasons; afs_eproto_causes; @@ -526,6 +575,7 @@ afs_fs_operations; afs_io_errors; afs_rotate_traces; afs_server_traces; +afs_vnode_invalid_traces; afs_vl_operations; yfs_cm_operations; @@ -670,7 +720,7 @@ TRACE_EVENT(afs_make_fs_call, } ), - TP_printk("c=%08x %06llx:%06llx:%06x %s", + TP_printk("c=%08x V=%llx i=%llx:%x %s", __entry->call, __entry->fid.vid, __entry->fid.vnode, @@ -704,7 +754,7 @@ TRACE_EVENT(afs_make_fs_calli, } ), - TP_printk("c=%08x %06llx:%06llx:%06x %s i=%u", + TP_printk("c=%08x V=%llx i=%llx:%x %s i=%u", __entry->call, __entry->fid.vid, __entry->fid.vnode, @@ -741,7 +791,7 @@ TRACE_EVENT(afs_make_fs_call1, __entry->name[__len] = 0; ), - TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\"", + TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\"", __entry->call, __entry->fid.vid, __entry->fid.vnode, @@ -782,7 +832,7 @@ TRACE_EVENT(afs_make_fs_call2, __entry->name2[__len2] = 0; ), - TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\" \"%s\"", + TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\" \"%s\"", __entry->call, __entry->fid.vid, __entry->fid.vnode, @@ -887,9 +937,9 @@ TRACE_EVENT(afs_sent_data, ); TRACE_EVENT(afs_dir_check_failed, - TP_PROTO(struct afs_vnode *vnode, loff_t off, loff_t i_size), + TP_PROTO(struct afs_vnode *vnode, loff_t off), - TP_ARGS(vnode, off, i_size), + TP_ARGS(vnode, off), TP_STRUCT__entry( __field(struct afs_vnode *, vnode) @@ -900,7 +950,7 @@ TRACE_EVENT(afs_dir_check_failed, TP_fast_assign( __entry->vnode = vnode; __entry->off = off; - __entry->i_size = i_size; + __entry->i_size = i_size_read(&vnode->netfs.inode); ), TP_printk("vn=%p %llx/%llx", @@ -1002,7 +1052,7 @@ TRACE_EVENT(afs_edit_dir, __entry->name[__len] = 0; ), - TP_printk("d=%x:%x %s %s %u[%u] f=%x:%x \"%s\"", + TP_printk("di=%x:%x %s %s %u[%u] fi=%x:%x \"%s\"", __entry->vnode, __entry->unique, __print_symbolic(__entry->why, afs_edit_dir_reasons), __print_symbolic(__entry->op, afs_edit_dir_ops), @@ -1011,6 +1061,122 @@ TRACE_EVENT(afs_edit_dir, __entry->name) ); +TRACE_EVENT(afs_dir_invalid, + TP_PROTO(const struct afs_vnode *dvnode, enum afs_dir_invalid_trace trace), + + TP_ARGS(dvnode, trace), + + TP_STRUCT__entry( + __field(unsigned int, vnode) + __field(unsigned int, unique) + __field(enum afs_dir_invalid_trace, trace) + ), + + TP_fast_assign( + __entry->vnode = dvnode->fid.vnode; + __entry->unique = dvnode->fid.unique; + __entry->trace = trace; + ), + + TP_printk("di=%x:%x %s", + __entry->vnode, __entry->unique, + __print_symbolic(__entry->trace, afs_dir_invalid_traces)) + ); + +TRACE_EVENT(afs_cb_promise, + TP_PROTO(const struct afs_vnode *vnode, enum afs_cb_promise_trace trace), + + TP_ARGS(vnode, trace), + + TP_STRUCT__entry( + __field(unsigned int, vnode) + __field(unsigned int, unique) + __field(enum afs_cb_promise_trace, trace) + ), + + TP_fast_assign( + __entry->vnode = vnode->fid.vnode; + __entry->unique = vnode->fid.unique; + __entry->trace = trace; + ), + + TP_printk("di=%x:%x %s", + __entry->vnode, __entry->unique, + __print_symbolic(__entry->trace, afs_cb_promise_traces)) + ); + +TRACE_EVENT(afs_vnode_invalid, + TP_PROTO(const struct afs_vnode *vnode, enum afs_vnode_invalid_trace trace), + + TP_ARGS(vnode, trace), + + TP_STRUCT__entry( + __field(unsigned int, vnode) + __field(unsigned int, unique) + __field(enum afs_vnode_invalid_trace, trace) + ), + + TP_fast_assign( + __entry->vnode = vnode->fid.vnode; + __entry->unique = vnode->fid.unique; + __entry->trace = trace; + ), + + TP_printk("di=%x:%x %s", + __entry->vnode, __entry->unique, + __print_symbolic(__entry->trace, afs_vnode_invalid_traces)) + ); + +TRACE_EVENT(afs_set_dv, + TP_PROTO(const struct afs_vnode *dvnode, u64 new_dv), + + TP_ARGS(dvnode, new_dv), + + TP_STRUCT__entry( + __field(unsigned int, vnode) + __field(unsigned int, unique) + __field(u64, old_dv) + __field(u64, new_dv) + ), + + TP_fast_assign( + __entry->vnode = dvnode->fid.vnode; + __entry->unique = dvnode->fid.unique; + __entry->old_dv = dvnode->status.data_version; + __entry->new_dv = new_dv; + ), + + TP_printk("di=%x:%x dv=%llx -> dv=%llx", + __entry->vnode, __entry->unique, + __entry->old_dv, __entry->new_dv) + ); + +TRACE_EVENT(afs_dv_mismatch, + TP_PROTO(const struct afs_vnode *dvnode, u64 before_dv, int delta, u64 new_dv), + + TP_ARGS(dvnode, before_dv, delta, new_dv), + + TP_STRUCT__entry( + __field(unsigned int, vnode) + __field(unsigned int, unique) + __field(int, delta) + __field(u64, before_dv) + __field(u64, new_dv) + ), + + TP_fast_assign( + __entry->vnode = dvnode->fid.vnode; + __entry->unique = dvnode->fid.unique; + __entry->delta = delta; + __entry->before_dv = before_dv; + __entry->new_dv = new_dv; + ), + + TP_printk("di=%x:%x xdv=%llx+%d dv=%llx", + __entry->vnode, __entry->unique, + __entry->before_dv, __entry->delta, __entry->new_dv) + ); + TRACE_EVENT(afs_protocol_error, TP_PROTO(struct afs_call *call, enum afs_eproto_cause cause), @@ -1611,6 +1777,36 @@ TRACE_EVENT(afs_make_call, __entry->fid.unique) ); +TRACE_EVENT(afs_read_recv, + TP_PROTO(const struct afs_operation *op, const struct afs_call *call), + + TP_ARGS(op, call), + + TP_STRUCT__entry( + __field(unsigned int, rreq) + __field(unsigned int, sreq) + __field(unsigned int, op) + __field(unsigned int, op_flags) + __field(unsigned int, call) + __field(enum afs_call_state, call_state) + ), + + TP_fast_assign( + __entry->op = op->debug_id; + __entry->sreq = op->fetch.subreq->debug_index; + __entry->rreq = op->fetch.subreq->rreq->debug_id; + __entry->op_flags = op->flags; + __entry->call = call->debug_id; + __entry->call_state = call->state; + ), + + TP_printk("R=%08x[%x] OP=%08x c=%08x cs=%x of=%x", + __entry->rreq, __entry->sreq, + __entry->op, + __entry->call, __entry->call_state, + __entry->op_flags) + ); + #endif /* _TRACE_AFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/amdxdna.h b/include/trace/events/amdxdna.h new file mode 100644 index 000000000000..c6cb2da7b706 --- /dev/null +++ b/include/trace/events/amdxdna.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023-2024, Advanced Micro Devices, Inc. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM amdxdna + +#if !defined(_TRACE_AMDXDNA_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_AMDXDNA_H + +#include <drm/gpu_scheduler.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(amdxdna_debug_point, + TP_PROTO(const char *name, u64 number, const char *str), + + TP_ARGS(name, number, str), + + TP_STRUCT__entry(__string(name, name) + __field(u64, number) + __string(str, str)), + + TP_fast_assign(__assign_str(name); + __entry->number = number; + __assign_str(str);), + + TP_printk("%s:%llu %s", __get_str(name), __entry->number, + __get_str(str)) +); + +TRACE_EVENT(xdna_job, + TP_PROTO(struct drm_sched_job *sched_job, const char *name, const char *str, u64 seq), + + TP_ARGS(sched_job, name, str, seq), + + TP_STRUCT__entry(__string(name, name) + __string(str, str) + __field(u64, fence_context) + __field(u64, fence_seqno) + __field(u64, seq)), + + TP_fast_assign(__assign_str(name); + __assign_str(str); + __entry->fence_context = sched_job->s_fence->finished.context; + __entry->fence_seqno = sched_job->s_fence->finished.seqno; + __entry->seq = seq;), + + TP_printk("fence=(context:%llu, seqno:%lld), %s seq#:%lld %s", + __entry->fence_context, __entry->fence_seqno, + __get_str(name), __entry->seq, + __get_str(str)) +); + +DECLARE_EVENT_CLASS(xdna_mbox_msg, + TP_PROTO(char *name, u8 chann_id, u32 opcode, u32 msg_id), + + TP_ARGS(name, chann_id, opcode, msg_id), + + TP_STRUCT__entry(__string(name, name) + __field(u32, chann_id) + __field(u32, opcode) + __field(u32, msg_id)), + + TP_fast_assign(__assign_str(name); + __entry->chann_id = chann_id; + __entry->opcode = opcode; + __entry->msg_id = msg_id;), + + TP_printk("%s.%d id 0x%x opcode 0x%x", __get_str(name), + __entry->chann_id, __entry->msg_id, __entry->opcode) +); + +DEFINE_EVENT(xdna_mbox_msg, mbox_set_tail, + TP_PROTO(char *name, u8 chann_id, u32 opcode, u32 id), + TP_ARGS(name, chann_id, opcode, id) +); + +DEFINE_EVENT(xdna_mbox_msg, mbox_set_head, + TP_PROTO(char *name, u8 chann_id, u32 opcode, u32 id), + TP_ARGS(name, chann_id, opcode, id) +); + +TRACE_EVENT(mbox_irq_handle, + TP_PROTO(char *name, int irq), + + TP_ARGS(name, irq), + + TP_STRUCT__entry(__string(name, name) + __field(int, irq)), + + TP_fast_assign(__assign_str(name); + __entry->irq = irq;), + + TP_printk("%s.%d", __get_str(name), __entry->irq) +); + +#endif /* !defined(_TRACE_AMDXDNA_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 4df93ca9b7a8..549ab3b41961 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -100,7 +100,8 @@ struct find_free_extent_ctl; EM( ALLOC_CHUNK, "ALLOC_CHUNK") \ EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \ EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \ - EMe(COMMIT_TRANS, "COMMIT_TRANS") + EM( COMMIT_TRANS, "COMMIT_TRANS") \ + EMe(RESET_ZONES, "RESET_ZONES") /* * First define the enums in the above macros to be exported to userspace via diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h index 7d931db02b93..a743b2a35ea7 100644 --- a/include/trace/events/cachefiles.h +++ b/include/trace/events/cachefiles.h @@ -223,10 +223,10 @@ TRACE_EVENT(cachefiles_ref, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, cookie ) - __field(enum cachefiles_obj_ref_trace, why ) - __field(int, usage ) + __field(unsigned int, obj) + __field(unsigned int, cookie) + __field(enum cachefiles_obj_ref_trace, why) + __field(int, usage) ), TP_fast_assign( @@ -249,10 +249,10 @@ TRACE_EVENT(cachefiles_lookup, TP_ARGS(obj, dir, de), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(short, error ) - __field(unsigned long, dino ) - __field(unsigned long, ino ) + __field(unsigned int, obj) + __field(short, error) + __field(unsigned long, dino) + __field(unsigned long, ino) ), TP_fast_assign( @@ -273,8 +273,8 @@ TRACE_EVENT(cachefiles_mkdir, TP_ARGS(dir, subdir), TP_STRUCT__entry( - __field(unsigned int, dir ) - __field(unsigned int, subdir ) + __field(unsigned int, dir) + __field(unsigned int, subdir) ), TP_fast_assign( @@ -293,8 +293,8 @@ TRACE_EVENT(cachefiles_tmpfile, TP_ARGS(obj, backer), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) + __field(unsigned int, obj) + __field(unsigned int, backer) ), TP_fast_assign( @@ -313,8 +313,8 @@ TRACE_EVENT(cachefiles_link, TP_ARGS(obj, backer), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) + __field(unsigned int, obj) + __field(unsigned int, backer) ), TP_fast_assign( @@ -336,9 +336,9 @@ TRACE_EVENT(cachefiles_unlink, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, ino ) - __field(enum fscache_why_object_killed, why ) + __field(unsigned int, obj) + __field(unsigned int, ino) + __field(enum fscache_why_object_killed, why) ), TP_fast_assign( @@ -361,9 +361,9 @@ TRACE_EVENT(cachefiles_rename, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, ino ) - __field(enum fscache_why_object_killed, why ) + __field(unsigned int, obj) + __field(unsigned int, ino) + __field(enum fscache_why_object_killed, why) ), TP_fast_assign( @@ -380,17 +380,20 @@ TRACE_EVENT(cachefiles_rename, TRACE_EVENT(cachefiles_coherency, TP_PROTO(struct cachefiles_object *obj, ino_t ino, + u64 disk_aux, enum cachefiles_content content, enum cachefiles_coherency_trace why), - TP_ARGS(obj, ino, content, why), + TP_ARGS(obj, ino, disk_aux, content, why), /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(enum cachefiles_coherency_trace, why ) - __field(enum cachefiles_content, content ) - __field(u64, ino ) + __field(unsigned int, obj) + __field(enum cachefiles_coherency_trace, why) + __field(enum cachefiles_content, content) + __field(u64, ino) + __field(u64, aux) + __field(u64, disk_aux) ), TP_fast_assign( @@ -398,13 +401,17 @@ TRACE_EVENT(cachefiles_coherency, __entry->why = why; __entry->content = content; __entry->ino = ino; + __entry->aux = be64_to_cpup((__be64 *)obj->cookie->inline_aux); + __entry->disk_aux = disk_aux; ), - TP_printk("o=%08x %s B=%llx c=%u", + TP_printk("o=%08x %s B=%llx c=%u aux=%llx dsk=%llx", __entry->obj, __print_symbolic(__entry->why, cachefiles_coherency_traces), __entry->ino, - __entry->content) + __entry->content, + __entry->aux, + __entry->disk_aux) ); TRACE_EVENT(cachefiles_vol_coherency, @@ -416,9 +423,9 @@ TRACE_EVENT(cachefiles_vol_coherency, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, vol ) - __field(enum cachefiles_coherency_trace, why ) - __field(u64, ino ) + __field(unsigned int, vol) + __field(enum cachefiles_coherency_trace, why) + __field(u64, ino) ), TP_fast_assign( @@ -445,14 +452,14 @@ TRACE_EVENT(cachefiles_prep_read, TP_ARGS(obj, start, len, flags, source, why, cache_inode, netfs_inode), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned short, flags ) - __field(enum netfs_io_source, source ) - __field(enum cachefiles_prepare_read_trace, why ) - __field(size_t, len ) - __field(loff_t, start ) - __field(unsigned int, netfs_inode ) - __field(unsigned int, cache_inode ) + __field(unsigned int, obj) + __field(unsigned short, flags) + __field(enum netfs_io_source, source) + __field(enum cachefiles_prepare_read_trace, why) + __field(size_t, len) + __field(loff_t, start) + __field(unsigned int, netfs_inode) + __field(unsigned int, cache_inode) ), TP_fast_assign( @@ -484,10 +491,10 @@ TRACE_EVENT(cachefiles_read, TP_ARGS(obj, backer, start, len), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(size_t, len ) - __field(loff_t, start ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(size_t, len) + __field(loff_t, start) ), TP_fast_assign( @@ -513,10 +520,10 @@ TRACE_EVENT(cachefiles_write, TP_ARGS(obj, backer, start, len), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(size_t, len ) - __field(loff_t, start ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(size_t, len) + __field(loff_t, start) ), TP_fast_assign( @@ -540,11 +547,11 @@ TRACE_EVENT(cachefiles_trunc, TP_ARGS(obj, backer, from, to, why), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(enum cachefiles_trunc_trace, why ) - __field(loff_t, from ) - __field(loff_t, to ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(enum cachefiles_trunc_trace, why) + __field(loff_t, from) + __field(loff_t, to) ), TP_fast_assign( @@ -571,8 +578,8 @@ TRACE_EVENT(cachefiles_mark_active, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(ino_t, inode ) + __field(unsigned int, obj) + __field(ino_t, inode) ), TP_fast_assign( @@ -592,8 +599,8 @@ TRACE_EVENT(cachefiles_mark_failed, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(ino_t, inode ) + __field(unsigned int, obj) + __field(ino_t, inode) ), TP_fast_assign( @@ -613,8 +620,8 @@ TRACE_EVENT(cachefiles_mark_inactive, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(ino_t, inode ) + __field(unsigned int, obj) + __field(ino_t, inode) ), TP_fast_assign( @@ -633,10 +640,10 @@ TRACE_EVENT(cachefiles_vfs_error, TP_ARGS(obj, backer, error, where), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(enum cachefiles_error_trace, where ) - __field(short, error ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(enum cachefiles_error_trace, where) + __field(short, error) ), TP_fast_assign( @@ -660,10 +667,10 @@ TRACE_EVENT(cachefiles_io_error, TP_ARGS(obj, backer, error, where), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(enum cachefiles_error_trace, where ) - __field(short, error ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(enum cachefiles_error_trace, where) + __field(short, error) ), TP_fast_assign( @@ -687,11 +694,11 @@ TRACE_EVENT(cachefiles_ondemand_open, TP_ARGS(obj, msg, load), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, msg_id ) - __field(unsigned int, object_id ) - __field(unsigned int, fd ) - __field(unsigned int, flags ) + __field(unsigned int, obj) + __field(unsigned int, msg_id) + __field(unsigned int, object_id) + __field(unsigned int, fd) + __field(unsigned int, flags) ), TP_fast_assign( @@ -717,9 +724,9 @@ TRACE_EVENT(cachefiles_ondemand_copen, TP_ARGS(obj, msg_id, len), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, msg_id ) - __field(long, len ) + __field(unsigned int, obj) + __field(unsigned int, msg_id) + __field(long, len) ), TP_fast_assign( @@ -740,9 +747,9 @@ TRACE_EVENT(cachefiles_ondemand_close, TP_ARGS(obj, msg), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, msg_id ) - __field(unsigned int, object_id ) + __field(unsigned int, obj) + __field(unsigned int, msg_id) + __field(unsigned int, object_id) ), TP_fast_assign( @@ -764,11 +771,11 @@ TRACE_EVENT(cachefiles_ondemand_read, TP_ARGS(obj, msg, load), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, msg_id ) - __field(unsigned int, object_id ) - __field(loff_t, start ) - __field(size_t, len ) + __field(unsigned int, obj) + __field(unsigned int, msg_id) + __field(unsigned int, object_id) + __field(loff_t, start) + __field(size_t, len) ), TP_fast_assign( @@ -793,8 +800,8 @@ TRACE_EVENT(cachefiles_ondemand_cread, TP_ARGS(obj, msg_id), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, msg_id ) + __field(unsigned int, obj) + __field(unsigned int, msg_id) ), TP_fast_assign( @@ -814,10 +821,10 @@ TRACE_EVENT(cachefiles_ondemand_fd_write, TP_ARGS(obj, backer, start, len), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(loff_t, start ) - __field(size_t, len ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(loff_t, start) + __field(size_t, len) ), TP_fast_assign( @@ -840,8 +847,8 @@ TRACE_EVENT(cachefiles_ondemand_fd_release, TP_ARGS(obj, object_id), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, object_id ) + __field(unsigned int, obj) + __field(unsigned int, object_id) ), TP_fast_assign( diff --git a/include/trace/events/capability.h b/include/trace/events/capability.h new file mode 100644 index 000000000000..17340257946c --- /dev/null +++ b/include/trace/events/capability.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM capability + +#if !defined(_TRACE_CAPABILITY_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CAPABILITY_H + +#include <linux/cred.h> +#include <linux/tracepoint.h> +#include <linux/user_namespace.h> + +/** + * cap_capable - called after it's determined if a task has a particular + * effective capability + * + * @cred: The credentials used + * @target_ns: The user namespace of the resource being accessed + * @capable_ns: The user namespace in which the credential provides the + * capability to access the targeted resource. + * This will be NULL if ret is not 0. + * @cap: The capability to check for + * @ret: The return value of the check: 0 if it does, -ve if it does not + * + * Allows to trace calls to cap_capable in commoncap.c + */ +TRACE_EVENT(cap_capable, + + TP_PROTO(const struct cred *cred, struct user_namespace *target_ns, + const struct user_namespace *capable_ns, int cap, int ret), + + TP_ARGS(cred, target_ns, capable_ns, cap, ret), + + TP_STRUCT__entry( + __field(const struct cred *, cred) + __field(struct user_namespace *, target_ns) + __field(const struct user_namespace *, capable_ns) + __field(int, cap) + __field(int, ret) + ), + + TP_fast_assign( + __entry->cred = cred; + __entry->target_ns = target_ns; + __entry->capable_ns = ret == 0 ? capable_ns : NULL; + __entry->cap = cap; + __entry->ret = ret; + ), + + TP_printk("cred %p, target_ns %p, capable_ns %p, cap %d, ret %d", + __entry->cred, __entry->target_ns, __entry->capable_ns, __entry->cap, + __entry->ret) +); + +#endif /* _TRACE_CAPABILITY_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 2851c823095b..eb3b2f1326b1 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -1119,11 +1119,11 @@ TRACE_EVENT(f2fs_reserve_new_blocks, (unsigned long long)__entry->count) ); -DECLARE_EVENT_CLASS(f2fs__submit_page_bio, +DECLARE_EVENT_CLASS(f2fs__submit_folio_bio, - TP_PROTO(struct page *page, struct f2fs_io_info *fio), + TP_PROTO(struct folio *folio, struct f2fs_io_info *fio), - TP_ARGS(page, fio), + TP_ARGS(folio, fio), TP_STRUCT__entry( __field(dev_t, dev) @@ -1138,9 +1138,9 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, ), TP_fast_assign( - __entry->dev = page_file_mapping(page)->host->i_sb->s_dev; - __entry->ino = page_file_mapping(page)->host->i_ino; - __entry->index = page->index; + __entry->dev = folio->mapping->host->i_sb->s_dev; + __entry->ino = folio->mapping->host->i_ino; + __entry->index = folio->index; __entry->old_blkaddr = fio->old_blkaddr; __entry->new_blkaddr = fio->new_blkaddr; __entry->op = fio->op; @@ -1149,7 +1149,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, __entry->type = fio->type; ), - TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " + TP_printk("dev = (%d,%d), ino = %lu, folio_index = 0x%lx, " "oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s(%s), type = %s_%s", show_dev_ino(__entry), (unsigned long)__entry->index, @@ -1160,22 +1160,22 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, show_block_type(__entry->type)) ); -DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio, +DEFINE_EVENT_CONDITION(f2fs__submit_folio_bio, f2fs_submit_folio_bio, - TP_PROTO(struct page *page, struct f2fs_io_info *fio), + TP_PROTO(struct folio *folio, struct f2fs_io_info *fio), - TP_ARGS(page, fio), + TP_ARGS(folio, fio), - TP_CONDITION(page->mapping) + TP_CONDITION(folio->mapping) ); -DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_write, +DEFINE_EVENT_CONDITION(f2fs__submit_folio_bio, f2fs_submit_folio_write, - TP_PROTO(struct page *page, struct f2fs_io_info *fio), + TP_PROTO(struct folio *folio, struct f2fs_io_info *fio), - TP_ARGS(page, fio), + TP_ARGS(folio, fio), - TP_CONDITION(page->mapping) + TP_CONDITION(folio->mapping) ); DECLARE_EVENT_CLASS(f2fs__bio, @@ -1322,12 +1322,11 @@ DECLARE_EVENT_CLASS(f2fs__folio, ), TP_fast_assign( - __entry->dev = folio_file_mapping(folio)->host->i_sb->s_dev; - __entry->ino = folio_file_mapping(folio)->host->i_ino; + __entry->dev = folio->mapping->host->i_sb->s_dev; + __entry->ino = folio->mapping->host->i_ino; __entry->type = type; - __entry->dir = - S_ISDIR(folio_file_mapping(folio)->host->i_mode); - __entry->index = folio_index(folio); + __entry->dir = S_ISDIR(folio->mapping->host->i_mode); + __entry->index = folio->index; __entry->dirty = folio_test_dirty(folio); __entry->uptodate = folio_test_uptodate(folio); ), diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h index 5d7ee2610728..8d22b2e98d48 100644 --- a/include/trace/events/fib6.h +++ b/include/trace/events/fib6.h @@ -22,6 +22,7 @@ TRACE_EVENT(fib6_table_lookup, __field( int, err ) __field( int, oif ) __field( int, iif ) + __field( u32, flowlabel ) __field( __u8, tos ) __field( __u8, scope ) __field( __u8, flags ) @@ -42,6 +43,7 @@ TRACE_EVENT(fib6_table_lookup, __entry->err = ip6_rt_type_to_error(res->fib6_type); __entry->oif = flp->flowi6_oif; __entry->iif = flp->flowi6_iif; + __entry->flowlabel = ntohl(flowi6_get_flowlabel(flp)); __entry->tos = ip6_tclass(flp->flowlabel); __entry->scope = flp->flowi6_scope; __entry->flags = flp->flowi6_flags; @@ -76,11 +78,11 @@ TRACE_EVENT(fib6_table_lookup, } ), - TP_printk("table %3u oif %d iif %d proto %u %pI6c/%u -> %pI6c/%u tos %d scope %d flags %x ==> dev %s gw %pI6c err %d", + TP_printk("table %3u oif %d iif %d proto %u %pI6c/%u -> %pI6c/%u flowlabel %#x tos %d scope %d flags %x ==> dev %s gw %pI6c err %d", __entry->tb_id, __entry->oif, __entry->iif, __entry->proto, __entry->src, __entry->sport, __entry->dst, __entry->dport, - __entry->tos, __entry->scope, __entry->flags, - __entry->name, __entry->gw, __entry->err) + __entry->flowlabel, __entry->tos, __entry->scope, + __entry->flags, __entry->name, __entry->gw, __entry->err) ); #endif /* _TRACE_FIB6_H */ diff --git a/include/trace/events/mmap_lock.h b/include/trace/events/mmap_lock.h index bc2e3ad787b3..cf9f9faf8914 100644 --- a/include/trace/events/mmap_lock.h +++ b/include/trace/events/mmap_lock.h @@ -5,6 +5,7 @@ #if !defined(_TRACE_MMAP_LOCK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_MMAP_LOCK_H +#include <linux/memcontrol.h> #include <linux/tracepoint.h> #include <linux/types.h> @@ -12,64 +13,61 @@ struct mm_struct; DECLARE_EVENT_CLASS(mmap_lock, - TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write), + TP_PROTO(struct mm_struct *mm, bool write), - TP_ARGS(mm, memcg_path, write), + TP_ARGS(mm, write), TP_STRUCT__entry( __field(struct mm_struct *, mm) - __string(memcg_path, memcg_path) + __field(u64, memcg_id) __field(bool, write) ), TP_fast_assign( __entry->mm = mm; - __assign_str(memcg_path); + __entry->memcg_id = cgroup_id_from_mm(mm); __entry->write = write; ), TP_printk( - "mm=%p memcg_path=%s write=%s", - __entry->mm, - __get_str(memcg_path), + "mm=%p memcg_id=%llu write=%s", + __entry->mm, __entry->memcg_id, __entry->write ? "true" : "false" ) ); #define DEFINE_MMAP_LOCK_EVENT(name) \ DEFINE_EVENT(mmap_lock, name, \ - TP_PROTO(struct mm_struct *mm, const char *memcg_path, \ - bool write), \ - TP_ARGS(mm, memcg_path, write)) + TP_PROTO(struct mm_struct *mm, bool write), \ + TP_ARGS(mm, write)) DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking); DEFINE_MMAP_LOCK_EVENT(mmap_lock_released); TRACE_EVENT(mmap_lock_acquire_returned, - TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write, - bool success), + TP_PROTO(struct mm_struct *mm, bool write, bool success), - TP_ARGS(mm, memcg_path, write, success), + TP_ARGS(mm, write, success), TP_STRUCT__entry( __field(struct mm_struct *, mm) - __string(memcg_path, memcg_path) + __field(u64, memcg_id) __field(bool, write) __field(bool, success) ), TP_fast_assign( __entry->mm = mm; - __assign_str(memcg_path); + __entry->memcg_id = cgroup_id_from_mm(mm); __entry->write = write; __entry->success = success; ), TP_printk( - "mm=%p memcg_path=%s write=%s success=%s", + "mm=%p memcg_id=%llu write=%s success=%s", __entry->mm, - __get_str(memcg_path), + __entry->memcg_id, __entry->write ? "true" : "false", __entry->success ? "true" : "false" ) diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index bb8a59c6caa2..72fbfe3caeaf 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -13,6 +13,69 @@ * Thus most bits set go first. */ +/* These define the values that are enums (the bits) */ +#define TRACE_GFP_FLAGS_GENERAL \ + TRACE_GFP_EM(DMA) \ + TRACE_GFP_EM(HIGHMEM) \ + TRACE_GFP_EM(DMA32) \ + TRACE_GFP_EM(MOVABLE) \ + TRACE_GFP_EM(RECLAIMABLE) \ + TRACE_GFP_EM(HIGH) \ + TRACE_GFP_EM(IO) \ + TRACE_GFP_EM(FS) \ + TRACE_GFP_EM(ZERO) \ + TRACE_GFP_EM(DIRECT_RECLAIM) \ + TRACE_GFP_EM(KSWAPD_RECLAIM) \ + TRACE_GFP_EM(WRITE) \ + TRACE_GFP_EM(NOWARN) \ + TRACE_GFP_EM(RETRY_MAYFAIL) \ + TRACE_GFP_EM(NOFAIL) \ + TRACE_GFP_EM(NORETRY) \ + TRACE_GFP_EM(MEMALLOC) \ + TRACE_GFP_EM(COMP) \ + TRACE_GFP_EM(NOMEMALLOC) \ + TRACE_GFP_EM(HARDWALL) \ + TRACE_GFP_EM(THISNODE) \ + TRACE_GFP_EM(ACCOUNT) \ + TRACE_GFP_EM(ZEROTAGS) + +#ifdef CONFIG_KASAN_HW_TAGS +# define TRACE_GFP_FLAGS_KASAN \ + TRACE_GFP_EM(SKIP_ZERO) \ + TRACE_GFP_EM(SKIP_KASAN) +#else +# define TRACE_GFP_FLAGS_KASAN +#endif + +#ifdef CONFIG_LOCKDEP +# define TRACE_GFP_FLAGS_LOCKDEP \ + TRACE_GFP_EM(NOLOCKDEP) +#else +# define TRACE_GFP_FLAGS_LOCKDEP +#endif + +#ifdef CONFIG_SLAB_OBJ_EXT +# define TRACE_GFP_FLAGS_SLAB \ + TRACE_GFP_EM(NO_OBJ_EXT) +#else +# define TRACE_GFP_FLAGS_SLAB +#endif + +#define TRACE_GFP_FLAGS \ + TRACE_GFP_FLAGS_GENERAL \ + TRACE_GFP_FLAGS_KASAN \ + TRACE_GFP_FLAGS_LOCKDEP \ + TRACE_GFP_FLAGS_SLAB + +#undef TRACE_GFP_EM +#define TRACE_GFP_EM(a) TRACE_DEFINE_ENUM(___GFP_##a##_BIT); + +TRACE_GFP_FLAGS + +/* Just in case these are ever used */ +TRACE_DEFINE_ENUM(___GFP_UNUSED_BIT); +TRACE_DEFINE_ENUM(___GFP_LAST_BIT); + #define gfpflag_string(flag) {(__force unsigned long)flag, #flag} #define __def_gfpflag_names \ @@ -116,7 +179,8 @@ DEF_PAGEFLAG_NAME(head), \ DEF_PAGEFLAG_NAME(reclaim), \ DEF_PAGEFLAG_NAME(swapbacked), \ - DEF_PAGEFLAG_NAME(unevictable) \ + DEF_PAGEFLAG_NAME(unevictable), \ + DEF_PAGEFLAG_NAME(dropbehind) \ IF_HAVE_PG_MLOCK(mlocked) \ IF_HAVE_PG_HWPOISON(hwpoison) \ IF_HAVE_PG_IDLE(idle) \ diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index bf511bca896e..6e699cadcb29 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -21,6 +21,7 @@ EM(netfs_read_trace_readahead, "READAHEAD") \ EM(netfs_read_trace_readpage, "READPAGE ") \ EM(netfs_read_trace_read_gaps, "READ-GAPS") \ + EM(netfs_read_trace_read_single, "READ-SNGL") \ EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \ E_(netfs_read_trace_write_begin, "WRITEBEGN") @@ -35,9 +36,11 @@ EM(NETFS_READAHEAD, "RA") \ EM(NETFS_READPAGE, "RP") \ EM(NETFS_READ_GAPS, "RG") \ + EM(NETFS_READ_SINGLE, "R1") \ EM(NETFS_READ_FOR_WRITE, "RW") \ EM(NETFS_DIO_READ, "DR") \ EM(NETFS_WRITEBACK, "WB") \ + EM(NETFS_WRITEBACK_SINGLE, "W1") \ EM(NETFS_WRITETHROUGH, "WT") \ EM(NETFS_UNBUFFERED_WRITE, "UW") \ EM(NETFS_DIO_WRITE, "DW") \ @@ -47,17 +50,23 @@ EM(netfs_rreq_trace_assess, "ASSESS ") \ EM(netfs_rreq_trace_copy, "COPY ") \ EM(netfs_rreq_trace_collect, "COLLECT") \ + EM(netfs_rreq_trace_complete, "COMPLET") \ + EM(netfs_rreq_trace_dirty, "DIRTY ") \ EM(netfs_rreq_trace_done, "DONE ") \ EM(netfs_rreq_trace_free, "FREE ") \ EM(netfs_rreq_trace_redirty, "REDIRTY") \ EM(netfs_rreq_trace_resubmit, "RESUBMT") \ + EM(netfs_rreq_trace_set_abandon, "S-ABNDN") \ EM(netfs_rreq_trace_set_pause, "PAUSE ") \ EM(netfs_rreq_trace_unlock, "UNLOCK ") \ EM(netfs_rreq_trace_unlock_pgpriv2, "UNLCK-2") \ EM(netfs_rreq_trace_unmark, "UNMARK ") \ EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \ EM(netfs_rreq_trace_wait_pause, "WT-PAUS") \ + EM(netfs_rreq_trace_wait_queue, "WAIT-Q ") \ EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \ + EM(netfs_rreq_trace_wake_queue, "WAKE-Q ") \ + EM(netfs_rreq_trace_woke_queue, "WOKE-Q ") \ EM(netfs_rreq_trace_unpause, "UNPAUSE") \ E_(netfs_rreq_trace_write_done, "WR-DONE") @@ -74,6 +83,10 @@ #define netfs_sreq_traces \ EM(netfs_sreq_trace_add_donations, "+DON ") \ EM(netfs_sreq_trace_added, "ADD ") \ + EM(netfs_sreq_trace_cache_nowrite, "CA-NW") \ + EM(netfs_sreq_trace_cache_prepare, "CA-PR") \ + EM(netfs_sreq_trace_cache_write, "CA-WR") \ + EM(netfs_sreq_trace_cancel, "CANCL") \ EM(netfs_sreq_trace_clear, "CLEAR") \ EM(netfs_sreq_trace_discard, "DSCRD") \ EM(netfs_sreq_trace_donate_to_prev, "DON-P") \ @@ -84,6 +97,9 @@ EM(netfs_sreq_trace_hit_eof, "EOF ") \ EM(netfs_sreq_trace_io_progress, "IO ") \ EM(netfs_sreq_trace_limited, "LIMIT") \ + EM(netfs_sreq_trace_need_clear, "N-CLR") \ + EM(netfs_sreq_trace_partial_read, "PARTR") \ + EM(netfs_sreq_trace_need_retry, "NRTRY") \ EM(netfs_sreq_trace_prepare, "PREP ") \ EM(netfs_sreq_trace_prep_failed, "PRPFL") \ EM(netfs_sreq_trace_progress, "PRGRS") \ @@ -129,6 +145,7 @@ EM(netfs_sreq_trace_get_submit, "GET SUBMIT") \ EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \ EM(netfs_sreq_trace_new, "NEW ") \ + EM(netfs_sreq_trace_put_abandon, "PUT ABANDON") \ EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \ EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \ EM(netfs_sreq_trace_put_consumed, "PUT CONSUME") \ @@ -152,6 +169,7 @@ EM(netfs_streaming_filled_page, "mod-streamw-f") \ EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \ EM(netfs_folio_trace_abandon, "abandon") \ + EM(netfs_folio_trace_alloc_buffer, "alloc-buf") \ EM(netfs_folio_trace_cancel_copy, "cancel-copy") \ EM(netfs_folio_trace_cancel_store, "cancel-store") \ EM(netfs_folio_trace_clear, "clear") \ @@ -168,6 +186,7 @@ EM(netfs_folio_trace_mkwrite, "mkwrite") \ EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \ EM(netfs_folio_trace_not_under_wback, "!wback") \ + EM(netfs_folio_trace_not_locked, "!locked") \ EM(netfs_folio_trace_put, "put") \ EM(netfs_folio_trace_read, "read") \ EM(netfs_folio_trace_read_done, "read-done") \ @@ -191,6 +210,14 @@ EM(netfs_trace_donate_to_next, "to-next") \ E_(netfs_trace_donate_to_deferred_next, "defer-next") +#define netfs_folioq_traces \ + EM(netfs_trace_folioq_alloc_buffer, "alloc-buf") \ + EM(netfs_trace_folioq_clear, "clear") \ + EM(netfs_trace_folioq_delete, "delete") \ + EM(netfs_trace_folioq_make_space, "make-space") \ + EM(netfs_trace_folioq_rollbuf_init, "roll-init") \ + E_(netfs_trace_folioq_read_progress, "r-progress") + #ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY @@ -209,6 +236,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte); enum netfs_folio_trace { netfs_folio_traces } __mode(byte); enum netfs_collect_contig_trace { netfs_collect_contig_traces } __mode(byte); enum netfs_donate_trace { netfs_donate_traces } __mode(byte); +enum netfs_folioq_trace { netfs_folioq_traces } __mode(byte); #endif @@ -232,6 +260,7 @@ netfs_sreq_ref_traces; netfs_folio_traces; netfs_collect_contig_traces; netfs_donate_traces; +netfs_folioq_traces; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -250,13 +279,13 @@ TRACE_EVENT(netfs_read, TP_ARGS(rreq, start, len, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(unsigned int, cookie ) - __field(loff_t, i_size ) - __field(loff_t, start ) - __field(size_t, len ) - __field(enum netfs_read_trace, what ) - __field(unsigned int, netfs_inode ) + __field(unsigned int, rreq) + __field(unsigned int, cookie) + __field(loff_t, i_size) + __field(loff_t, start) + __field(size_t, len) + __field(enum netfs_read_trace, what) + __field(unsigned int, netfs_inode) ), TP_fast_assign( @@ -284,10 +313,10 @@ TRACE_EVENT(netfs_rreq, TP_ARGS(rreq, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(unsigned int, flags ) - __field(enum netfs_io_origin, origin ) - __field(enum netfs_rreq_trace, what ) + __field(unsigned int, rreq) + __field(unsigned int, flags) + __field(enum netfs_io_origin, origin) + __field(enum netfs_rreq_trace, what) ), TP_fast_assign( @@ -311,15 +340,16 @@ TRACE_EVENT(netfs_sreq, TP_ARGS(sreq, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(unsigned short, index ) - __field(short, error ) - __field(unsigned short, flags ) - __field(enum netfs_io_source, source ) - __field(enum netfs_sreq_trace, what ) - __field(size_t, len ) - __field(size_t, transferred ) - __field(loff_t, start ) + __field(unsigned int, rreq) + __field(unsigned short, index) + __field(short, error) + __field(unsigned short, flags) + __field(enum netfs_io_source, source) + __field(enum netfs_sreq_trace, what) + __field(u8, slot) + __field(size_t, len) + __field(size_t, transferred) + __field(loff_t, start) ), TP_fast_assign( @@ -332,15 +362,16 @@ TRACE_EVENT(netfs_sreq, __entry->len = sreq->len; __entry->transferred = sreq->transferred; __entry->start = sreq->start; + __entry->slot = sreq->io_iter.folioq_slot; ), - TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx e=%d", + TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx s=%u e=%d", __entry->rreq, __entry->index, __print_symbolic(__entry->source, netfs_sreq_sources), __print_symbolic(__entry->what, netfs_sreq_traces), __entry->flags, __entry->start, __entry->transferred, __entry->len, - __entry->error) + __entry->slot, __entry->error) ); TRACE_EVENT(netfs_failure, @@ -351,15 +382,15 @@ TRACE_EVENT(netfs_failure, TP_ARGS(rreq, sreq, error, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(short, index ) - __field(short, error ) - __field(unsigned short, flags ) - __field(enum netfs_io_source, source ) - __field(enum netfs_failure, what ) - __field(size_t, len ) - __field(size_t, transferred ) - __field(loff_t, start ) + __field(unsigned int, rreq) + __field(short, index) + __field(short, error) + __field(unsigned short, flags) + __field(enum netfs_io_source, source) + __field(enum netfs_failure, what) + __field(size_t, len) + __field(size_t, transferred) + __field(loff_t, start) ), TP_fast_assign( @@ -390,9 +421,9 @@ TRACE_EVENT(netfs_rreq_ref, TP_ARGS(rreq_debug_id, ref, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(int, ref ) - __field(enum netfs_rreq_ref_trace, what ) + __field(unsigned int, rreq) + __field(int, ref) + __field(enum netfs_rreq_ref_trace, what) ), TP_fast_assign( @@ -414,10 +445,10 @@ TRACE_EVENT(netfs_sreq_ref, TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(unsigned int, subreq ) - __field(int, ref ) - __field(enum netfs_sreq_ref_trace, what ) + __field(unsigned int, rreq) + __field(unsigned int, subreq) + __field(int, ref) + __field(enum netfs_sreq_ref_trace, what) ), TP_fast_assign( @@ -465,10 +496,10 @@ TRACE_EVENT(netfs_write_iter, TP_ARGS(iocb, from), TP_STRUCT__entry( - __field(unsigned long long, start ) - __field(size_t, len ) - __field(unsigned int, flags ) - __field(unsigned int, ino ) + __field(unsigned long long, start) + __field(size_t, len) + __field(unsigned int, flags) + __field(unsigned int, ino) ), TP_fast_assign( @@ -489,12 +520,12 @@ TRACE_EVENT(netfs_write, TP_ARGS(wreq, what), TP_STRUCT__entry( - __field(unsigned int, wreq ) - __field(unsigned int, cookie ) - __field(unsigned int, ino ) - __field(enum netfs_write_trace, what ) - __field(unsigned long long, start ) - __field(unsigned long long, len ) + __field(unsigned int, wreq) + __field(unsigned int, cookie) + __field(unsigned int, ino) + __field(enum netfs_write_trace, what) + __field(unsigned long long, start) + __field(unsigned long long, len) ), TP_fast_assign( @@ -522,10 +553,10 @@ TRACE_EVENT(netfs_collect, TP_ARGS(wreq), TP_STRUCT__entry( - __field(unsigned int, wreq ) - __field(unsigned int, len ) - __field(unsigned long long, transferred ) - __field(unsigned long long, start ) + __field(unsigned int, wreq) + __field(unsigned int, len) + __field(unsigned long long, transferred) + __field(unsigned long long, start) ), TP_fast_assign( @@ -548,12 +579,12 @@ TRACE_EVENT(netfs_collect_sreq, TP_ARGS(wreq, subreq), TP_STRUCT__entry( - __field(unsigned int, wreq ) - __field(unsigned int, subreq ) - __field(unsigned int, stream ) - __field(unsigned int, len ) - __field(unsigned int, transferred ) - __field(unsigned long long, start ) + __field(unsigned int, wreq) + __field(unsigned int, subreq) + __field(unsigned int, stream) + __field(unsigned int, len) + __field(unsigned int, transferred) + __field(unsigned long long, start) ), TP_fast_assign( @@ -579,11 +610,11 @@ TRACE_EVENT(netfs_collect_folio, TP_ARGS(wreq, folio, fend, collected_to), TP_STRUCT__entry( - __field(unsigned int, wreq ) - __field(unsigned long, index ) - __field(unsigned long long, fend ) - __field(unsigned long long, cleaned_to ) - __field(unsigned long long, collected_to ) + __field(unsigned int, wreq) + __field(unsigned long, index) + __field(unsigned long long, fend) + __field(unsigned long long, cleaned_to) + __field(unsigned long long, collected_to) ), TP_fast_assign( @@ -608,10 +639,10 @@ TRACE_EVENT(netfs_collect_state, TP_ARGS(wreq, collected_to, notes), TP_STRUCT__entry( - __field(unsigned int, wreq ) - __field(unsigned int, notes ) - __field(unsigned long long, collected_to ) - __field(unsigned long long, cleaned_to ) + __field(unsigned int, wreq) + __field(unsigned int, notes) + __field(unsigned long long, collected_to) + __field(unsigned long long, cleaned_to) ), TP_fast_assign( @@ -680,69 +711,27 @@ TRACE_EVENT(netfs_collect_stream, __entry->collected_to, __entry->front) ); -TRACE_EVENT(netfs_progress, - TP_PROTO(const struct netfs_io_subrequest *subreq, - unsigned long long start, size_t avail, size_t part), +TRACE_EVENT(netfs_folioq, + TP_PROTO(const struct folio_queue *fq, + enum netfs_folioq_trace trace), - TP_ARGS(subreq, start, avail, part), + TP_ARGS(fq, trace), TP_STRUCT__entry( __field(unsigned int, rreq) - __field(unsigned int, subreq) - __field(unsigned int, consumed) - __field(unsigned int, transferred) - __field(unsigned long long, f_start) - __field(unsigned int, f_avail) - __field(unsigned int, f_part) - __field(unsigned char, slot) + __field(unsigned int, id) + __field(enum netfs_folioq_trace, trace) ), TP_fast_assign( - __entry->rreq = subreq->rreq->debug_id; - __entry->subreq = subreq->debug_index; - __entry->consumed = subreq->consumed; - __entry->transferred = subreq->transferred; - __entry->f_start = start; - __entry->f_avail = avail; - __entry->f_part = part; - __entry->slot = subreq->curr_folioq_slot; - ), - - TP_printk("R=%08x[%02x] s=%llx ct=%x/%x pa=%x/%x sl=%x", - __entry->rreq, __entry->subreq, __entry->f_start, - __entry->consumed, __entry->transferred, - __entry->f_part, __entry->f_avail, __entry->slot) - ); - -TRACE_EVENT(netfs_donate, - TP_PROTO(const struct netfs_io_request *rreq, - const struct netfs_io_subrequest *from, - const struct netfs_io_subrequest *to, - size_t amount, - enum netfs_donate_trace trace), - - TP_ARGS(rreq, from, to, amount, trace), - - TP_STRUCT__entry( - __field(unsigned int, rreq) - __field(unsigned int, from) - __field(unsigned int, to) - __field(unsigned int, amount) - __field(enum netfs_donate_trace, trace) - ), - - TP_fast_assign( - __entry->rreq = rreq->debug_id; - __entry->from = from->debug_index; - __entry->to = to ? to->debug_index : -1; - __entry->amount = amount; + __entry->rreq = fq ? fq->rreq_id : 0; + __entry->id = fq ? fq->debug_id : 0; __entry->trace = trace; ), - TP_printk("R=%08x[%02x] -> [%02x] %s am=%x", - __entry->rreq, __entry->from, __entry->to, - __print_symbolic(__entry->trace, netfs_donate_traces), - __entry->amount) + TP_printk("R=%08x fq=%x %s", + __entry->rreq, __entry->id, + __print_symbolic(__entry->trace, netfs_folioq_traces)) ); #undef EM diff --git a/include/trace/events/rv.h b/include/trace/events/rv.h deleted file mode 100644 index 56592da9301c..000000000000 --- a/include/trace/events/rv.h +++ /dev/null @@ -1,142 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM rv - -#if !defined(_TRACE_RV_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_RV_H - -#include <linux/rv.h> -#include <linux/tracepoint.h> - -#ifdef CONFIG_DA_MON_EVENTS_IMPLICIT -DECLARE_EVENT_CLASS(event_da_monitor, - - TP_PROTO(char *state, char *event, char *next_state, bool final_state), - - TP_ARGS(state, event, next_state, final_state), - - TP_STRUCT__entry( - __array( char, state, MAX_DA_NAME_LEN ) - __array( char, event, MAX_DA_NAME_LEN ) - __array( char, next_state, MAX_DA_NAME_LEN ) - __field( bool, final_state ) - ), - - TP_fast_assign( - memcpy(__entry->state, state, MAX_DA_NAME_LEN); - memcpy(__entry->event, event, MAX_DA_NAME_LEN); - memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN); - __entry->final_state = final_state; - ), - - TP_printk("%s x %s -> %s %s", - __entry->state, - __entry->event, - __entry->next_state, - __entry->final_state ? "(final)" : "") -); - -DECLARE_EVENT_CLASS(error_da_monitor, - - TP_PROTO(char *state, char *event), - - TP_ARGS(state, event), - - TP_STRUCT__entry( - __array( char, state, MAX_DA_NAME_LEN ) - __array( char, event, MAX_DA_NAME_LEN ) - ), - - TP_fast_assign( - memcpy(__entry->state, state, MAX_DA_NAME_LEN); - memcpy(__entry->event, event, MAX_DA_NAME_LEN); - ), - - TP_printk("event %s not expected in the state %s", - __entry->event, - __entry->state) -); - -#ifdef CONFIG_RV_MON_WIP -DEFINE_EVENT(event_da_monitor, event_wip, - TP_PROTO(char *state, char *event, char *next_state, bool final_state), - TP_ARGS(state, event, next_state, final_state)); - -DEFINE_EVENT(error_da_monitor, error_wip, - TP_PROTO(char *state, char *event), - TP_ARGS(state, event)); -#endif /* CONFIG_RV_MON_WIP */ -#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */ - -#ifdef CONFIG_DA_MON_EVENTS_ID -DECLARE_EVENT_CLASS(event_da_monitor_id, - - TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state), - - TP_ARGS(id, state, event, next_state, final_state), - - TP_STRUCT__entry( - __field( int, id ) - __array( char, state, MAX_DA_NAME_LEN ) - __array( char, event, MAX_DA_NAME_LEN ) - __array( char, next_state, MAX_DA_NAME_LEN ) - __field( bool, final_state ) - ), - - TP_fast_assign( - memcpy(__entry->state, state, MAX_DA_NAME_LEN); - memcpy(__entry->event, event, MAX_DA_NAME_LEN); - memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN); - __entry->id = id; - __entry->final_state = final_state; - ), - - TP_printk("%d: %s x %s -> %s %s", - __entry->id, - __entry->state, - __entry->event, - __entry->next_state, - __entry->final_state ? "(final)" : "") -); - -DECLARE_EVENT_CLASS(error_da_monitor_id, - - TP_PROTO(int id, char *state, char *event), - - TP_ARGS(id, state, event), - - TP_STRUCT__entry( - __field( int, id ) - __array( char, state, MAX_DA_NAME_LEN ) - __array( char, event, MAX_DA_NAME_LEN ) - ), - - TP_fast_assign( - memcpy(__entry->state, state, MAX_DA_NAME_LEN); - memcpy(__entry->event, event, MAX_DA_NAME_LEN); - __entry->id = id; - ), - - TP_printk("%d: event %s not expected in the state %s", - __entry->id, - __entry->event, - __entry->state) -); - -#ifdef CONFIG_RV_MON_WWNR -/* id is the pid of the task */ -DEFINE_EVENT(event_da_monitor_id, event_wwnr, - TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state), - TP_ARGS(id, state, event, next_state, final_state)); - -DEFINE_EVENT(error_da_monitor_id, error_wwnr, - TP_PROTO(int id, char *state, char *event), - TP_ARGS(id, state, event)); -#endif /* CONFIG_RV_MON_WWNR */ - -#endif /* CONFIG_DA_MON_EVENTS_ID */ -#endif /* _TRACE_RV_H */ - -/* This part ust be outside protection */ -#undef TRACE_INCLUDE_PATH -#include <trace/define_trace.h> diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index d03e0bd8c028..2f119d18a061 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -117,8 +117,10 @@ #define rxrpc_call_poke_traces \ EM(rxrpc_call_poke_abort, "Abort") \ EM(rxrpc_call_poke_complete, "Compl") \ + EM(rxrpc_call_poke_conn_abort, "Conn-abort") \ EM(rxrpc_call_poke_error, "Error") \ EM(rxrpc_call_poke_idle, "Idle") \ + EM(rxrpc_call_poke_rx_packet, "Rx-packet") \ EM(rxrpc_call_poke_set_timeout, "Set-timo") \ EM(rxrpc_call_poke_start, "Start") \ EM(rxrpc_call_poke_timer, "Timer") \ @@ -127,9 +129,9 @@ #define rxrpc_skb_traces \ EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \ EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \ + EM(rxrpc_skb_get_call_rx, "GET call-rx ") \ EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \ EM(rxrpc_skb_get_conn_work, "GET conn-work") \ - EM(rxrpc_skb_get_last_nack, "GET last-nack") \ EM(rxrpc_skb_get_local_work, "GET locl-work") \ EM(rxrpc_skb_get_reject_work, "GET rej-work ") \ EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \ @@ -138,12 +140,12 @@ EM(rxrpc_skb_new_error_report, "NEW error-rpt") \ EM(rxrpc_skb_new_jumbo_subpacket, "NEW jumbo-sub") \ EM(rxrpc_skb_new_unshared, "NEW unshared ") \ + EM(rxrpc_skb_put_call_rx, "PUT call-rx ") \ EM(rxrpc_skb_put_conn_secured, "PUT conn-secd") \ EM(rxrpc_skb_put_conn_work, "PUT conn-work") \ EM(rxrpc_skb_put_error_report, "PUT error-rep") \ EM(rxrpc_skb_put_input, "PUT input ") \ EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \ - EM(rxrpc_skb_put_last_nack, "PUT last-nack") \ EM(rxrpc_skb_put_purge, "PUT purge ") \ EM(rxrpc_skb_put_rotate, "PUT rotate ") \ EM(rxrpc_skb_put_unknown, "PUT unknown ") \ @@ -282,6 +284,7 @@ EM(rxrpc_call_see_activate_client, "SEE act-clnt") \ EM(rxrpc_call_see_connect_failed, "SEE con-fail") \ EM(rxrpc_call_see_connected, "SEE connect ") \ + EM(rxrpc_call_see_conn_abort, "SEE conn-abt") \ EM(rxrpc_call_see_disconnected, "SEE disconn ") \ EM(rxrpc_call_see_distribute_error, "SEE dist-err") \ EM(rxrpc_call_see_input, "SEE input ") \ @@ -292,7 +295,6 @@ #define rxrpc_txqueue_traces \ EM(rxrpc_txqueue_await_reply, "AWR") \ - EM(rxrpc_txqueue_dequeue, "DEQ") \ EM(rxrpc_txqueue_end, "END") \ EM(rxrpc_txqueue_queue, "QUE") \ EM(rxrpc_txqueue_queue_last, "QLS") \ @@ -300,6 +302,13 @@ EM(rxrpc_txqueue_rotate_last, "RLS") \ E_(rxrpc_txqueue_wait, "WAI") +#define rxrpc_txdata_traces \ + EM(rxrpc_txdata_inject_loss, " *INJ-LOSS*") \ + EM(rxrpc_txdata_new_data, " ") \ + EM(rxrpc_txdata_retransmit, " *RETRANS*") \ + EM(rxrpc_txdata_tlp_new_data, " *TLP-NEW*") \ + E_(rxrpc_txdata_tlp_retransmit, " *TLP-RETRANS*") + #define rxrpc_receive_traces \ EM(rxrpc_receive_end, "END") \ EM(rxrpc_receive_front, "FRN") \ @@ -335,11 +344,10 @@ E_(rxrpc_rtt_tx_ping, "PING") #define rxrpc_rtt_rx_traces \ - EM(rxrpc_rtt_rx_other_ack, "OACK") \ + EM(rxrpc_rtt_rx_data_ack, "DACK") \ EM(rxrpc_rtt_rx_obsolete, "OBSL") \ EM(rxrpc_rtt_rx_lost, "LOST") \ - EM(rxrpc_rtt_rx_ping_response, "PONG") \ - E_(rxrpc_rtt_rx_requested_ack, "RACK") + E_(rxrpc_rtt_rx_ping_response, "PONG") #define rxrpc_timer_traces \ EM(rxrpc_timer_trace_delayed_ack, "DelayAck ") \ @@ -347,11 +355,12 @@ EM(rxrpc_timer_trace_hard, "HardLimit") \ EM(rxrpc_timer_trace_idle, "IdleLimit") \ EM(rxrpc_timer_trace_keepalive, "KeepAlive") \ - EM(rxrpc_timer_trace_lost_ack, "LostAck ") \ EM(rxrpc_timer_trace_ping, "DelayPing") \ - EM(rxrpc_timer_trace_resend, "Resend ") \ - EM(rxrpc_timer_trace_resend_reset, "ResendRst") \ - E_(rxrpc_timer_trace_resend_tx, "ResendTx ") + EM(rxrpc_timer_trace_rack_off, "RACK-OFF ") \ + EM(rxrpc_timer_trace_rack_zwp, "RACK-ZWP ") \ + EM(rxrpc_timer_trace_rack_reo, "RACK-Reo ") \ + EM(rxrpc_timer_trace_rack_tlp_pto, "TLP-PTO ") \ + E_(rxrpc_timer_trace_rack_rto, "RTO ") #define rxrpc_propose_ack_traces \ EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ @@ -362,22 +371,24 @@ EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ EM(rxrpc_propose_ack_ping_for_0_retrans, "0-Retrn") \ + EM(rxrpc_propose_ack_ping_for_mtu_probe, "MTUProb") \ EM(rxrpc_propose_ack_ping_for_old_rtt, "OldRtt ") \ EM(rxrpc_propose_ack_ping_for_params, "Params ") \ EM(rxrpc_propose_ack_ping_for_rtt, "Rtt ") \ EM(rxrpc_propose_ack_processing_op, "ProcOp ") \ EM(rxrpc_propose_ack_respond_to_ack, "Rsp2Ack") \ EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \ + EM(rxrpc_propose_ack_retransmit, "Retrans") \ EM(rxrpc_propose_ack_retry_tx, "RetryTx") \ EM(rxrpc_propose_ack_rotate_rx, "RxAck ") \ EM(rxrpc_propose_ack_rx_idle, "RxIdle ") \ E_(rxrpc_propose_ack_terminal_ack, "ClTerm ") -#define rxrpc_congest_modes \ - EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \ - EM(RXRPC_CALL_FAST_RETRANSMIT, "FastReTx ") \ - EM(RXRPC_CALL_PACKET_LOSS, "PktLoss ") \ - E_(RXRPC_CALL_SLOW_START, "SlowStart") +#define rxrpc_ca_states \ + EM(RXRPC_CA_CONGEST_AVOIDANCE, "CongAvoid") \ + EM(RXRPC_CA_FAST_RETRANSMIT, "FastReTx ") \ + EM(RXRPC_CA_PACKET_LOSS, "PktLoss ") \ + E_(RXRPC_CA_SLOW_START, "SlowStart") #define rxrpc_congest_changes \ EM(rxrpc_cong_begin_retransmission, " Retrans") \ @@ -450,7 +461,7 @@ #define rxrpc_req_ack_traces \ EM(rxrpc_reqack_ack_lost, "ACK-LOST ") \ - EM(rxrpc_reqack_already_on, "ALREADY-ON") \ + EM(rxrpc_reqack_app_stall, "APP-STALL ") \ EM(rxrpc_reqack_more_rtt, "MORE-RTT ") \ EM(rxrpc_reqack_no_srv_last, "NO-SRVLAST") \ EM(rxrpc_reqack_old_rtt, "OLD-RTT ") \ @@ -460,21 +471,60 @@ /* ---- Must update size of stat_why_req_ack[] if more are added! */ #define rxrpc_txbuf_traces \ - EM(rxrpc_txbuf_alloc_ack, "ALLOC ACK ") \ EM(rxrpc_txbuf_alloc_data, "ALLOC DATA ") \ EM(rxrpc_txbuf_free, "FREE ") \ EM(rxrpc_txbuf_get_buffer, "GET BUFFER ") \ EM(rxrpc_txbuf_get_trans, "GET TRANS ") \ EM(rxrpc_txbuf_get_retrans, "GET RETRANS") \ - EM(rxrpc_txbuf_put_ack_tx, "PUT ACK TX ") \ EM(rxrpc_txbuf_put_cleaned, "PUT CLEANED") \ EM(rxrpc_txbuf_put_nomem, "PUT NOMEM ") \ EM(rxrpc_txbuf_put_rotated, "PUT ROTATED") \ EM(rxrpc_txbuf_put_send_aborted, "PUT SEND-X ") \ EM(rxrpc_txbuf_put_trans, "PUT TRANS ") \ + EM(rxrpc_txbuf_see_lost, "SEE LOST ") \ EM(rxrpc_txbuf_see_out_of_step, "OUT-OF-STEP") \ - EM(rxrpc_txbuf_see_send_more, "SEE SEND+ ") \ - E_(rxrpc_txbuf_see_unacked, "SEE UNACKED") + E_(rxrpc_txbuf_see_send_more, "SEE SEND+ ") + +#define rxrpc_tq_traces \ + EM(rxrpc_tq_alloc, "ALLOC") \ + EM(rxrpc_tq_cleaned, "CLEAN") \ + EM(rxrpc_tq_decant, "DCNT ") \ + EM(rxrpc_tq_decant_advance, "DCNT>") \ + EM(rxrpc_tq_queue, "QUEUE") \ + EM(rxrpc_tq_queue_dup, "QUE!!") \ + EM(rxrpc_tq_rotate, "ROT ") \ + EM(rxrpc_tq_rotate_and_free, "ROT-F") \ + EM(rxrpc_tq_rotate_and_keep, "ROT-K") \ + EM(rxrpc_tq_transmit, "XMIT ") \ + E_(rxrpc_tq_transmit_advance, "XMIT>") + +#define rxrpc_pmtud_reduce_traces \ + EM(rxrpc_pmtud_reduce_ack, "Ack ") \ + EM(rxrpc_pmtud_reduce_icmp, "Icmp ") \ + E_(rxrpc_pmtud_reduce_route, "Route") + +#define rxrpc_rotate_traces \ + EM(rxrpc_rotate_trace_hack, "hard-ack") \ + EM(rxrpc_rotate_trace_sack, "soft-ack") \ + E_(rxrpc_rotate_trace_snak, "soft-nack") + +#define rxrpc_rack_timer_modes \ + EM(RXRPC_CALL_RACKTIMER_OFF, "---") \ + EM(RXRPC_CALL_RACKTIMER_RACK_REORDER, "REO") \ + EM(RXRPC_CALL_RACKTIMER_TLP_PTO, "TLP") \ + E_(RXRPC_CALL_RACKTIMER_RTO, "RTO") + +#define rxrpc_tlp_probe_traces \ + EM(rxrpc_tlp_probe_trace_busy, "busy") \ + EM(rxrpc_tlp_probe_trace_transmit_new, "transmit-new") \ + E_(rxrpc_tlp_probe_trace_retransmit, "retransmit") + +#define rxrpc_tlp_ack_traces \ + EM(rxrpc_tlp_ack_trace_acked, "acked") \ + EM(rxrpc_tlp_ack_trace_dup_acked, "dup-acked") \ + EM(rxrpc_tlp_ack_trace_hard_beyond, "hard-beyond") \ + EM(rxrpc_tlp_ack_trace_incomplete, "incomplete") \ + E_(rxrpc_tlp_ack_trace_new_data, "new-data") /* * Generate enums for tracing information. @@ -496,18 +546,24 @@ enum rxrpc_congest_change { rxrpc_congest_changes } __mode(byte); enum rxrpc_conn_trace { rxrpc_conn_traces } __mode(byte); enum rxrpc_local_trace { rxrpc_local_traces } __mode(byte); enum rxrpc_peer_trace { rxrpc_peer_traces } __mode(byte); +enum rxrpc_pmtud_reduce_trace { rxrpc_pmtud_reduce_traces } __mode(byte); enum rxrpc_propose_ack_outcome { rxrpc_propose_ack_outcomes } __mode(byte); enum rxrpc_propose_ack_trace { rxrpc_propose_ack_traces } __mode(byte); enum rxrpc_receive_trace { rxrpc_receive_traces } __mode(byte); enum rxrpc_recvmsg_trace { rxrpc_recvmsg_traces } __mode(byte); enum rxrpc_req_ack_trace { rxrpc_req_ack_traces } __mode(byte); +enum rxrpc_rotate_trace { rxrpc_rotate_traces } __mode(byte); enum rxrpc_rtt_rx_trace { rxrpc_rtt_rx_traces } __mode(byte); enum rxrpc_rtt_tx_trace { rxrpc_rtt_tx_traces } __mode(byte); enum rxrpc_sack_trace { rxrpc_sack_traces } __mode(byte); enum rxrpc_skb_trace { rxrpc_skb_traces } __mode(byte); enum rxrpc_timer_trace { rxrpc_timer_traces } __mode(byte); +enum rxrpc_tlp_ack_trace { rxrpc_tlp_ack_traces } __mode(byte); +enum rxrpc_tlp_probe_trace { rxrpc_tlp_probe_traces } __mode(byte); +enum rxrpc_tq_trace { rxrpc_tq_traces } __mode(byte); enum rxrpc_tx_point { rxrpc_tx_points } __mode(byte); enum rxrpc_txbuf_trace { rxrpc_txbuf_traces } __mode(byte); +enum rxrpc_txdata_trace { rxrpc_txdata_traces } __mode(byte); enum rxrpc_txqueue_trace { rxrpc_txqueue_traces } __mode(byte); #endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */ @@ -525,24 +581,31 @@ enum rxrpc_txqueue_trace { rxrpc_txqueue_traces } __mode(byte); rxrpc_abort_reasons; rxrpc_bundle_traces; +rxrpc_ca_states; rxrpc_call_poke_traces; rxrpc_call_traces; rxrpc_client_traces; rxrpc_congest_changes; -rxrpc_congest_modes; rxrpc_conn_traces; rxrpc_local_traces; +rxrpc_pmtud_reduce_traces; rxrpc_propose_ack_traces; +rxrpc_rack_timer_modes; rxrpc_receive_traces; rxrpc_recvmsg_traces; rxrpc_req_ack_traces; +rxrpc_rotate_traces; rxrpc_rtt_rx_traces; rxrpc_rtt_tx_traces; rxrpc_sack_traces; rxrpc_skb_traces; rxrpc_timer_traces; +rxrpc_tlp_ack_traces; +rxrpc_tlp_probe_traces; +rxrpc_tq_traces; rxrpc_tx_points; rxrpc_txbuf_traces; +rxrpc_txdata_traces; rxrpc_txqueue_traces; /* @@ -581,6 +644,20 @@ TRACE_EVENT(rxrpc_local, __entry->usage) ); +TRACE_EVENT(rxrpc_iothread_rx, + TP_PROTO(struct rxrpc_local *local, unsigned int nr_rx), + TP_ARGS(local, nr_rx), + TP_STRUCT__entry( + __field(unsigned int, local) + __field(unsigned int, nr_rx) + ), + TP_fast_assign( + __entry->local = local->debug_id; + __entry->nr_rx = nr_rx; + ), + TP_printk("L=%08x nrx=%u", __entry->local, __entry->nr_rx) + ); + TRACE_EVENT(rxrpc_peer, TP_PROTO(unsigned int peer_debug_id, int ref, enum rxrpc_peer_trace why), @@ -865,34 +942,101 @@ TRACE_EVENT(rxrpc_txqueue, TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_txqueue_trace, why) - __field(rxrpc_seq_t, acks_hard_ack) __field(rxrpc_seq_t, tx_bottom) + __field(rxrpc_seq_t, acks_hard_ack) __field(rxrpc_seq_t, tx_top) - __field(rxrpc_seq_t, tx_prepared) + __field(rxrpc_seq_t, send_top) __field(int, tx_winsize) ), TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; - __entry->acks_hard_ack = call->acks_hard_ack; __entry->tx_bottom = call->tx_bottom; + __entry->acks_hard_ack = call->acks_hard_ack; __entry->tx_top = call->tx_top; - __entry->tx_prepared = call->tx_prepared; + __entry->send_top = call->send_top; __entry->tx_winsize = call->tx_winsize; ), - TP_printk("c=%08x %s f=%08x h=%08x n=%u/%u/%u/%u", + TP_printk("c=%08x %s b=%08x h=%08x n=%u/%u/%u/%u", __entry->call, __print_symbolic(__entry->why, rxrpc_txqueue_traces), __entry->tx_bottom, __entry->acks_hard_ack, - __entry->tx_top - __entry->tx_bottom, + __entry->acks_hard_ack - __entry->tx_bottom, __entry->tx_top - __entry->acks_hard_ack, - __entry->tx_prepared - __entry->tx_bottom, + __entry->send_top - __entry->tx_top, __entry->tx_winsize) ); +TRACE_EVENT(rxrpc_transmit, + TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t send_top, int space), + + TP_ARGS(call, send_top, space), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(rxrpc_seq_t, seq) + __field(u16, space) + __field(u16, tx_winsize) + __field(u16, cong_cwnd) + __field(u16, cong_extra) + __field(u16, in_flight) + __field(u16, prepared) + __field(u16, pmtud_jumbo) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->seq = call->tx_top + 1; + __entry->space = space; + __entry->tx_winsize = call->tx_winsize; + __entry->cong_cwnd = call->cong_cwnd; + __entry->cong_extra = call->cong_extra; + __entry->prepared = send_top - call->tx_bottom; + __entry->in_flight = call->tx_top - call->tx_bottom; + __entry->pmtud_jumbo = call->peer->pmtud_jumbo; + ), + + TP_printk("c=%08x q=%08x sp=%u tw=%u cw=%u+%u pr=%u if=%u pj=%u", + __entry->call, + __entry->seq, + __entry->space, + __entry->tx_winsize, + __entry->cong_cwnd, + __entry->cong_extra, + __entry->prepared, + __entry->in_flight, + __entry->pmtud_jumbo) + ); + +TRACE_EVENT(rxrpc_tx_rotate, + TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, rxrpc_seq_t to), + + TP_ARGS(call, seq, to), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(rxrpc_seq_t, seq) + __field(rxrpc_seq_t, to) + __field(rxrpc_seq_t, top) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->seq = seq; + __entry->to = to; + __entry->top = call->tx_top; + ), + + TP_printk("c=%08x q=%08x-%08x-%08x", + __entry->call, + __entry->seq, + __entry->to, + __entry->top) + ); + TRACE_EVENT(rxrpc_rx_data, TP_PROTO(unsigned int call, rxrpc_seq_t seq, rxrpc_serial_t serial, u8 flags), @@ -921,11 +1065,9 @@ TRACE_EVENT(rxrpc_rx_data, ); TRACE_EVENT(rxrpc_rx_ack, - TP_PROTO(struct rxrpc_call *call, - rxrpc_serial_t serial, rxrpc_serial_t ack_serial, - rxrpc_seq_t first, rxrpc_seq_t prev, u8 reason, u8 n_acks), + TP_PROTO(struct rxrpc_call *call, struct rxrpc_skb_priv *sp), - TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks), + TP_ARGS(call, sp), TP_STRUCT__entry( __field(unsigned int, call) @@ -935,23 +1077,26 @@ TRACE_EVENT(rxrpc_rx_ack, __field(rxrpc_seq_t, prev) __field(u8, reason) __field(u8, n_acks) + __field(u8, user_status) ), TP_fast_assign( - __entry->call = call->debug_id; - __entry->serial = serial; - __entry->ack_serial = ack_serial; - __entry->first = first; - __entry->prev = prev; - __entry->reason = reason; - __entry->n_acks = n_acks; + __entry->call = call->debug_id; + __entry->serial = sp->hdr.serial; + __entry->user_status = sp->hdr.userStatus; + __entry->ack_serial = sp->ack.acked_serial; + __entry->first = sp->ack.first_ack; + __entry->prev = sp->ack.prev_ack; + __entry->reason = sp->ack.reason; + __entry->n_acks = sp->ack.nr_acks; ), - TP_printk("c=%08x %08x %s r=%08x f=%08x p=%08x n=%u", + TP_printk("c=%08x %08x %s r=%08x us=%02x f=%08x p=%08x n=%u", __entry->call, __entry->serial, __print_symbolic(__entry->reason, rxrpc_ack_names), __entry->ack_serial, + __entry->user_status, __entry->first, __entry->prev, __entry->n_acks) @@ -981,6 +1126,29 @@ TRACE_EVENT(rxrpc_rx_abort, __entry->abort_code) ); +TRACE_EVENT(rxrpc_rx_conn_abort, + TP_PROTO(const struct rxrpc_connection *conn, const struct sk_buff *skb), + + TP_ARGS(conn, skb), + + TP_STRUCT__entry( + __field(unsigned int, conn) + __field(rxrpc_serial_t, serial) + __field(u32, abort_code) + ), + + TP_fast_assign( + __entry->conn = conn->debug_id; + __entry->serial = rxrpc_skb(skb)->hdr.serial; + __entry->abort_code = skb->priority; + ), + + TP_printk("C=%08x ABORT %08x ac=%d", + __entry->conn, + __entry->serial, + __entry->abort_code) + ); + TRACE_EVENT(rxrpc_rx_challenge, TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial, u32 version, u32 nonce, u32 min_level), @@ -1102,9 +1270,10 @@ TRACE_EVENT(rxrpc_tx_packet, TRACE_EVENT(rxrpc_tx_data, TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, - rxrpc_serial_t serial, unsigned int flags, bool lose), + rxrpc_serial_t serial, unsigned int flags, + enum rxrpc_txdata_trace trace), - TP_ARGS(call, seq, serial, flags, lose), + TP_ARGS(call, seq, serial, flags, trace), TP_STRUCT__entry( __field(unsigned int, call) @@ -1113,7 +1282,7 @@ TRACE_EVENT(rxrpc_tx_data, __field(u32, cid) __field(u32, call_id) __field(u16, flags) - __field(bool, lose) + __field(enum rxrpc_txdata_trace, trace) ), TP_fast_assign( @@ -1123,26 +1292,26 @@ TRACE_EVENT(rxrpc_tx_data, __entry->seq = seq; __entry->serial = serial; __entry->flags = flags; - __entry->lose = lose; + __entry->trace = trace; ), - TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s%s", + TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s", __entry->call, __entry->cid, __entry->call_id, __entry->serial, __entry->seq, __entry->flags & RXRPC_TXBUF_WIRE_FLAGS, - __entry->flags & RXRPC_TXBUF_RESENT ? " *RETRANS*" : "", - __entry->lose ? " *LOSE*" : "") + __print_symbolic(__entry->trace, rxrpc_txdata_traces)) ); TRACE_EVENT(rxrpc_tx_ack, TP_PROTO(unsigned int call, rxrpc_serial_t serial, rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial, - u8 reason, u8 n_acks, u16 rwind), + u8 reason, u8 n_acks, u16 rwind, + enum rxrpc_propose_ack_trace trace), - TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks, rwind), + TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks, rwind, trace), TP_STRUCT__entry( __field(unsigned int, call) @@ -1152,6 +1321,7 @@ TRACE_EVENT(rxrpc_tx_ack, __field(u8, reason) __field(u8, n_acks) __field(u16, rwind) + __field(enum rxrpc_propose_ack_trace, trace) ), TP_fast_assign( @@ -1162,16 +1332,18 @@ TRACE_EVENT(rxrpc_tx_ack, __entry->reason = reason; __entry->n_acks = n_acks; __entry->rwind = rwind; + __entry->trace = trace; ), - TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u rw=%u", + TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u rw=%u %s", __entry->call, __entry->serial, __print_symbolic(__entry->reason, rxrpc_ack_names), __entry->ack_first, __entry->ack_serial, __entry->n_acks, - __entry->rwind) + __entry->rwind, + __print_symbolic(__entry->trace, rxrpc_propose_ack_traces)) ); TRACE_EVENT(rxrpc_receive, @@ -1296,9 +1468,9 @@ TRACE_EVENT(rxrpc_rtt_rx, TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, int slot, rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, - u32 rtt, u32 rto), + u32 rtt, u32 srtt, u32 rto), - TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto), + TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, srtt, rto), TP_STRUCT__entry( __field(unsigned int, call) @@ -1307,7 +1479,9 @@ TRACE_EVENT(rxrpc_rtt_rx, __field(rxrpc_serial_t, send_serial) __field(rxrpc_serial_t, resp_serial) __field(u32, rtt) + __field(u32, srtt) __field(u32, rto) + __field(u32, min_rtt) ), TP_fast_assign( @@ -1317,17 +1491,21 @@ TRACE_EVENT(rxrpc_rtt_rx, __entry->send_serial = send_serial; __entry->resp_serial = resp_serial; __entry->rtt = rtt; + __entry->srtt = srtt; __entry->rto = rto; + __entry->min_rtt = minmax_get(&call->min_rtt) ), - TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u rto=%u", + TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u srtt=%u rto=%u min=%u", __entry->call, __entry->slot, __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), __entry->send_serial, __entry->resp_serial, __entry->rtt, - __entry->rto) + __entry->srtt / 8, + __entry->rto, + __entry->min_rtt) ); TRACE_EVENT(rxrpc_timer_set, @@ -1544,112 +1722,125 @@ TRACE_EVENT(rxrpc_drop_ack, ); TRACE_EVENT(rxrpc_retransmit, - TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, - rxrpc_serial_t serial, ktime_t expiry), + TP_PROTO(struct rxrpc_call *call, + struct rxrpc_send_data_req *req, + struct rxrpc_txbuf *txb), - TP_ARGS(call, seq, serial, expiry), + TP_ARGS(call, req, txb), TP_STRUCT__entry( __field(unsigned int, call) + __field(unsigned int, qbase) __field(rxrpc_seq_t, seq) __field(rxrpc_serial_t, serial) - __field(ktime_t, expiry) ), TP_fast_assign( __entry->call = call->debug_id; - __entry->seq = seq; - __entry->serial = serial; - __entry->expiry = expiry; + __entry->qbase = req->tq->qbase; + __entry->seq = req->seq; + __entry->serial = txb->serial; ), - TP_printk("c=%08x q=%x r=%x xp=%lld", + TP_printk("c=%08x tq=%x q=%x r=%x", __entry->call, + __entry->qbase, __entry->seq, - __entry->serial, - ktime_to_us(__entry->expiry)) + __entry->serial) ); TRACE_EVENT(rxrpc_congest, - TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary, - rxrpc_serial_t ack_serial, enum rxrpc_congest_change change), + TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary), - TP_ARGS(call, summary, ack_serial, change), + TP_ARGS(call, summary), TP_STRUCT__entry( __field(unsigned int, call) - __field(enum rxrpc_congest_change, change) + __field(enum rxrpc_ca_state, ca_state) __field(rxrpc_seq_t, hard_ack) __field(rxrpc_seq_t, top) __field(rxrpc_seq_t, lowest_nak) - __field(rxrpc_serial_t, ack_serial) + __field(u16, nr_sacks) + __field(u16, nr_snacks) + __field(u16, cwnd) + __field(u16, ssthresh) + __field(u16, cumul_acks) + __field(u16, dup_acks) __field_struct(struct rxrpc_ack_summary, sum) ), TP_fast_assign( __entry->call = call->debug_id; - __entry->change = change; + __entry->ca_state = call->cong_ca_state; __entry->hard_ack = call->acks_hard_ack; __entry->top = call->tx_top; __entry->lowest_nak = call->acks_lowest_nak; - __entry->ack_serial = ack_serial; + __entry->nr_sacks = call->acks_nr_sacks; + __entry->nr_snacks = call->acks_nr_snacks; + __entry->cwnd = call->cong_cwnd; + __entry->ssthresh = call->cong_ssthresh; + __entry->cumul_acks = call->cong_cumul_acks; + __entry->dup_acks = call->cong_dup_acks; memcpy(&__entry->sum, summary, sizeof(__entry->sum)); ), - TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u,%u b=%u u=%u d=%u l=%x%s%s%s", + TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u A=%u+%u/%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s", __entry->call, - __entry->ack_serial, + __entry->sum.acked_serial, __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names), __entry->hard_ack, - __print_symbolic(__entry->sum.mode, rxrpc_congest_modes), - __entry->sum.cwnd, - __entry->sum.ssthresh, - __entry->sum.nr_acks, __entry->sum.nr_retained_nacks, - __entry->sum.nr_new_acks, - __entry->sum.nr_new_nacks, + __print_symbolic(__entry->ca_state, rxrpc_ca_states), + __entry->cwnd, + __entry->ssthresh, + __entry->nr_sacks, __entry->sum.nr_new_sacks, + __entry->nr_snacks, __entry->sum.nr_new_snacks, + __entry->sum.nr_new_hacks, __entry->top - __entry->hard_ack, - __entry->sum.cumulative_acks, - __entry->sum.dup_acks, - __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "", - __print_symbolic(__entry->change, rxrpc_congest_changes), + __entry->cumul_acks, + __entry->dup_acks, + __entry->lowest_nak, __entry->sum.new_low_snack ? "!" : "", + __print_symbolic(__entry->sum.change, rxrpc_congest_changes), __entry->sum.retrans_timeo ? " rTxTo" : "") ); TRACE_EVENT(rxrpc_reset_cwnd, - TP_PROTO(struct rxrpc_call *call, ktime_t now), + TP_PROTO(struct rxrpc_call *call, ktime_t since_last_tx, ktime_t rtt), - TP_ARGS(call, now), + TP_ARGS(call, since_last_tx, rtt), TP_STRUCT__entry( __field(unsigned int, call) - __field(enum rxrpc_congest_mode, mode) + __field(enum rxrpc_ca_state, ca_state) __field(unsigned short, cwnd) __field(unsigned short, extra) __field(rxrpc_seq_t, hard_ack) __field(rxrpc_seq_t, prepared) __field(ktime_t, since_last_tx) + __field(ktime_t, rtt) __field(bool, has_data) ), TP_fast_assign( __entry->call = call->debug_id; - __entry->mode = call->cong_mode; + __entry->ca_state = call->cong_ca_state; __entry->cwnd = call->cong_cwnd; __entry->extra = call->cong_extra; __entry->hard_ack = call->acks_hard_ack; - __entry->prepared = call->tx_prepared - call->tx_bottom; - __entry->since_last_tx = ktime_sub(now, call->tx_last_sent); - __entry->has_data = !list_empty(&call->tx_sendmsg); + __entry->prepared = call->send_top - call->tx_bottom; + __entry->since_last_tx = since_last_tx; + __entry->rtt = rtt; + __entry->has_data = call->tx_bottom != call->tx_top; ), - TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu d=%u", + TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu/%llu d=%u", __entry->call, __entry->hard_ack, - __print_symbolic(__entry->mode, rxrpc_congest_modes), + __print_symbolic(__entry->ca_state, rxrpc_ca_states), __entry->cwnd, __entry->extra, __entry->prepared, - ktime_to_ns(__entry->since_last_tx), + ktime_to_us(__entry->since_last_tx), + ktime_to_us(__entry->rtt), __entry->has_data) ); @@ -1722,10 +1913,36 @@ TRACE_EVENT(rxrpc_connect_call, &__entry->srx.transport) ); +TRACE_EVENT(rxrpc_apply_acks, + TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq), + + TP_ARGS(call, tq), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(unsigned int, nr_rep) + __field(rxrpc_seq_t, qbase) + __field(unsigned long, acks) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->qbase = tq->qbase; + __entry->acks = tq->segment_acked; + __entry->nr_rep = tq->nr_reported_acks; + ), + + TP_printk("c=%08x tq=%x acks=%016lx rep=%u", + __entry->call, + __entry->qbase, + __entry->acks, + __entry->nr_rep) + ); + TRACE_EVENT(rxrpc_resend, - TP_PROTO(struct rxrpc_call *call, struct sk_buff *ack), + TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t ack_serial), - TP_ARGS(call, ack), + TP_ARGS(call, ack_serial), TP_STRUCT__entry( __field(unsigned int, call) @@ -1735,11 +1952,10 @@ TRACE_EVENT(rxrpc_resend, ), TP_fast_assign( - struct rxrpc_skb_priv *sp = ack ? rxrpc_skb(ack) : NULL; __entry->call = call->debug_id; __entry->seq = call->acks_hard_ack; __entry->transmitted = call->tx_transmitted; - __entry->ack_serial = sp ? sp->hdr.serial : 0; + __entry->ack_serial = ack_serial; ), TP_printk("c=%08x r=%x q=%x tq=%x", @@ -1749,6 +1965,63 @@ TRACE_EVENT(rxrpc_resend, __entry->transmitted) ); +TRACE_EVENT(rxrpc_resend_lost, + TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq, unsigned long lost), + + TP_ARGS(call, tq, lost), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(rxrpc_seq_t, qbase) + __field(u8, nr_rep) + __field(unsigned long, lost) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->qbase = tq->qbase; + __entry->nr_rep = tq->nr_reported_acks; + __entry->lost = lost; + ), + + TP_printk("c=%08x tq=%x lost=%016lx nr=%u", + __entry->call, + __entry->qbase, + __entry->lost, + __entry->nr_rep) + ); + +TRACE_EVENT(rxrpc_rotate, + TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq, + struct rxrpc_ack_summary *summary, rxrpc_seq_t seq, + enum rxrpc_rotate_trace trace), + + TP_ARGS(call, tq, summary, seq, trace), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(rxrpc_seq_t, qbase) + __field(rxrpc_seq_t, seq) + __field(unsigned int, nr_rep) + __field(enum rxrpc_rotate_trace, trace) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->qbase = tq->qbase; + __entry->seq = seq; + __entry->nr_rep = tq->nr_reported_acks; + __entry->trace = trace; + ), + + TP_printk("c=%08x tq=%x q=%x nr=%x %s", + __entry->call, + __entry->qbase, + __entry->seq, + __entry->nr_rep, + __print_symbolic(__entry->trace, rxrpc_rotate_traces)) + ); + TRACE_EVENT(rxrpc_rx_icmp, TP_PROTO(struct rxrpc_peer *peer, struct sock_extended_err *ee, struct sockaddr_rxrpc *srx), @@ -1858,38 +2131,36 @@ TRACE_EVENT(rxrpc_notify_socket, ); TRACE_EVENT(rxrpc_rx_discard_ack, - TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, - rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first, - rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev), + TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial, + rxrpc_seq_t hard_ack, rxrpc_seq_t prev_pkt), - TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first, - prev_pkt, call_ackr_prev), + TP_ARGS(call, serial, hard_ack, prev_pkt), TP_STRUCT__entry( __field(unsigned int, debug_id) __field(rxrpc_serial_t, serial) - __field(rxrpc_seq_t, first_soft_ack) - __field(rxrpc_seq_t, call_ackr_first) + __field(rxrpc_seq_t, hard_ack) __field(rxrpc_seq_t, prev_pkt) - __field(rxrpc_seq_t, call_ackr_prev) + __field(rxrpc_seq_t, acks_hard_ack) + __field(rxrpc_seq_t, acks_prev_seq) ), TP_fast_assign( - __entry->debug_id = debug_id; + __entry->debug_id = call->debug_id; __entry->serial = serial; - __entry->first_soft_ack = first_soft_ack; - __entry->call_ackr_first = call_ackr_first; + __entry->hard_ack = hard_ack; __entry->prev_pkt = prev_pkt; - __entry->call_ackr_prev = call_ackr_prev; + __entry->acks_hard_ack = call->acks_hard_ack; + __entry->acks_prev_seq = call->acks_prev_seq; ), TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x", __entry->debug_id, __entry->serial, - __entry->first_soft_ack, - __entry->call_ackr_first, + __entry->hard_ack, + __entry->acks_hard_ack, __entry->prev_pkt, - __entry->call_ackr_prev) + __entry->acks_prev_seq) ); TRACE_EVENT(rxrpc_req_ack, @@ -1947,6 +2218,33 @@ TRACE_EVENT(rxrpc_txbuf, __entry->ref) ); +TRACE_EVENT(rxrpc_tq, + TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq, + rxrpc_seq_t seq, enum rxrpc_tq_trace trace), + + TP_ARGS(call, tq, seq, trace), + + TP_STRUCT__entry( + __field(unsigned int, call_debug_id) + __field(rxrpc_seq_t, qbase) + __field(rxrpc_seq_t, seq) + __field(enum rxrpc_tq_trace, trace) + ), + + TP_fast_assign( + __entry->call_debug_id = call->debug_id; + __entry->qbase = tq ? tq->qbase : call->tx_qbase; + __entry->seq = seq; + __entry->trace = trace; + ), + + TP_printk("c=%08x bq=%08x q=%08x %s", + __entry->call_debug_id, + __entry->qbase, + __entry->seq, + __print_symbolic(__entry->trace, rxrpc_tq_traces)) + ); + TRACE_EVENT(rxrpc_poke_call, TP_PROTO(struct rxrpc_call *call, bool busy, enum rxrpc_call_poke_trace what), @@ -2015,6 +2313,360 @@ TRACE_EVENT(rxrpc_sack, __entry->sack) ); +TRACE_EVENT(rxrpc_pmtud_tx, + TP_PROTO(struct rxrpc_call *call), + + TP_ARGS(call), + + TP_STRUCT__entry( + __field(unsigned int, peer_debug_id) + __field(unsigned int, call_debug_id) + __field(rxrpc_serial_t, ping_serial) + __field(unsigned short, pmtud_trial) + __field(unsigned short, pmtud_good) + __field(unsigned short, pmtud_bad) + ), + + TP_fast_assign( + __entry->peer_debug_id = call->peer->debug_id; + __entry->call_debug_id = call->debug_id; + __entry->ping_serial = call->conn->pmtud_probe; + __entry->pmtud_trial = call->peer->pmtud_trial; + __entry->pmtud_good = call->peer->pmtud_good; + __entry->pmtud_bad = call->peer->pmtud_bad; + ), + + TP_printk("P=%08x c=%08x pr=%08x %u-%u-%u", + __entry->peer_debug_id, + __entry->call_debug_id, + __entry->ping_serial, + __entry->pmtud_good, + __entry->pmtud_trial, + __entry->pmtud_bad) + ); + +TRACE_EVENT(rxrpc_pmtud_rx, + TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t resp_serial), + + TP_ARGS(conn, resp_serial), + + TP_STRUCT__entry( + __field(unsigned int, peer_debug_id) + __field(unsigned int, call_debug_id) + __field(rxrpc_serial_t, ping_serial) + __field(rxrpc_serial_t, resp_serial) + __field(unsigned short, max_data) + __field(u8, jumbo_max) + ), + + TP_fast_assign( + __entry->peer_debug_id = conn->peer->debug_id; + __entry->call_debug_id = conn->pmtud_call; + __entry->ping_serial = conn->pmtud_probe; + __entry->resp_serial = resp_serial; + __entry->max_data = conn->peer->max_data; + __entry->jumbo_max = conn->peer->pmtud_jumbo; + ), + + TP_printk("P=%08x c=%08x pr=%08x rr=%08x max=%u jm=%u", + __entry->peer_debug_id, + __entry->call_debug_id, + __entry->ping_serial, + __entry->resp_serial, + __entry->max_data, + __entry->jumbo_max) + ); + +TRACE_EVENT(rxrpc_pmtud_lost, + TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t resp_serial), + + TP_ARGS(conn, resp_serial), + + TP_STRUCT__entry( + __field(unsigned int, peer_debug_id) + __field(unsigned int, call_debug_id) + __field(rxrpc_serial_t, ping_serial) + __field(rxrpc_serial_t, resp_serial) + ), + + TP_fast_assign( + __entry->peer_debug_id = conn->peer->debug_id; + __entry->call_debug_id = conn->pmtud_call; + __entry->ping_serial = conn->pmtud_probe; + __entry->resp_serial = resp_serial; + ), + + TP_printk("P=%08x c=%08x pr=%08x rr=%08x", + __entry->peer_debug_id, + __entry->call_debug_id, + __entry->ping_serial, + __entry->resp_serial) + ); + +TRACE_EVENT(rxrpc_pmtud_reduce, + TP_PROTO(struct rxrpc_peer *peer, rxrpc_serial_t serial, + unsigned int max_data, enum rxrpc_pmtud_reduce_trace reason), + + TP_ARGS(peer, serial, max_data, reason), + + TP_STRUCT__entry( + __field(unsigned int, peer_debug_id) + __field(rxrpc_serial_t, serial) + __field(unsigned int, max_data) + __field(enum rxrpc_pmtud_reduce_trace, reason) + ), + + TP_fast_assign( + __entry->peer_debug_id = peer->debug_id; + __entry->serial = serial; + __entry->max_data = max_data; + __entry->reason = reason; + ), + + TP_printk("P=%08x %s r=%08x m=%u", + __entry->peer_debug_id, + __print_symbolic(__entry->reason, rxrpc_pmtud_reduce_traces), + __entry->serial, __entry->max_data) + ); + +TRACE_EVENT(rxrpc_rack, + TP_PROTO(struct rxrpc_call *call, ktime_t timo), + + TP_ARGS(call, timo), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(rxrpc_serial_t, ack_serial) + __field(rxrpc_seq_t, seq) + __field(enum rxrpc_rack_timer_mode, mode) + __field(unsigned short, nr_sent) + __field(unsigned short, nr_lost) + __field(unsigned short, nr_resent) + __field(unsigned short, nr_sacked) + __field(ktime_t, timo) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->ack_serial = call->rx_serial; + __entry->seq = call->rack_end_seq; + __entry->mode = call->rack_timer_mode; + __entry->nr_sent = call->tx_nr_sent; + __entry->nr_lost = call->tx_nr_lost; + __entry->nr_resent = call->tx_nr_resent; + __entry->nr_sacked = call->acks_nr_sacks; + __entry->timo = timo; + ), + + TP_printk("c=%08x r=%08x q=%08x %s slrs=%u,%u,%u,%u t=%lld", + __entry->call, __entry->ack_serial, __entry->seq, + __print_symbolic(__entry->mode, rxrpc_rack_timer_modes), + __entry->nr_sent, __entry->nr_lost, + __entry->nr_resent, __entry->nr_sacked, + ktime_to_us(__entry->timo)) + ); + +TRACE_EVENT(rxrpc_rack_update, + TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary), + + TP_ARGS(call, summary), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(rxrpc_serial_t, ack_serial) + __field(rxrpc_seq_t, seq) + __field(int, xmit_ts) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->ack_serial = call->rx_serial; + __entry->seq = call->rack_end_seq; + __entry->xmit_ts = ktime_sub(call->acks_latest_ts, call->rack_xmit_ts); + ), + + TP_printk("c=%08x r=%08x q=%08x xt=%lld", + __entry->call, __entry->ack_serial, __entry->seq, + ktime_to_us(__entry->xmit_ts)) + ); + +TRACE_EVENT(rxrpc_rack_scan_loss, + TP_PROTO(struct rxrpc_call *call), + + TP_ARGS(call), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(ktime_t, rack_rtt) + __field(ktime_t, rack_reo_wnd) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->rack_rtt = call->rack_rtt; + __entry->rack_reo_wnd = call->rack_reo_wnd; + ), + + TP_printk("c=%08x rtt=%lld reow=%lld", + __entry->call, ktime_to_us(__entry->rack_rtt), + ktime_to_us(__entry->rack_reo_wnd)) + ); + +TRACE_EVENT(rxrpc_rack_scan_loss_tq, + TP_PROTO(struct rxrpc_call *call, const struct rxrpc_txqueue *tq, + unsigned long nacks), + + TP_ARGS(call, tq, nacks), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(rxrpc_seq_t, qbase) + __field(unsigned long, nacks) + __field(unsigned long, lost) + __field(unsigned long, retrans) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->qbase = tq->qbase; + __entry->nacks = nacks; + __entry->lost = tq->segment_lost; + __entry->retrans = tq->segment_retransmitted; + ), + + TP_printk("c=%08x q=%08x n=%lx l=%lx r=%lx", + __entry->call, __entry->qbase, + __entry->nacks, __entry->lost, __entry->retrans) + ); + +TRACE_EVENT(rxrpc_rack_detect_loss, + TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary, + rxrpc_seq_t seq), + + TP_ARGS(call, summary, seq), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(rxrpc_serial_t, ack_serial) + __field(rxrpc_seq_t, seq) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->ack_serial = call->rx_serial; + __entry->seq = seq; + ), + + TP_printk("c=%08x r=%08x q=%08x", + __entry->call, __entry->ack_serial, __entry->seq) + ); + +TRACE_EVENT(rxrpc_rack_mark_loss_tq, + TP_PROTO(struct rxrpc_call *call, const struct rxrpc_txqueue *tq), + + TP_ARGS(call, tq), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(rxrpc_seq_t, qbase) + __field(rxrpc_seq_t, trans) + __field(unsigned long, acked) + __field(unsigned long, lost) + __field(unsigned long, retrans) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->qbase = tq->qbase; + __entry->trans = call->tx_transmitted; + __entry->acked = tq->segment_acked; + __entry->lost = tq->segment_lost; + __entry->retrans = tq->segment_retransmitted; + ), + + TP_printk("c=%08x tq=%08x txq=%08x a=%lx l=%lx r=%lx", + __entry->call, __entry->qbase, __entry->trans, + __entry->acked, __entry->lost, __entry->retrans) + ); + +TRACE_EVENT(rxrpc_tlp_probe, + TP_PROTO(struct rxrpc_call *call, enum rxrpc_tlp_probe_trace trace), + + TP_ARGS(call, trace), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(rxrpc_serial_t, serial) + __field(rxrpc_seq_t, seq) + __field(enum rxrpc_tlp_probe_trace, trace) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->serial = call->tlp_serial; + __entry->seq = call->tlp_seq; + __entry->trace = trace; + ), + + TP_printk("c=%08x r=%08x pq=%08x %s", + __entry->call, __entry->serial, __entry->seq, + __print_symbolic(__entry->trace, rxrpc_tlp_probe_traces)) + ); + +TRACE_EVENT(rxrpc_tlp_ack, + TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary, + enum rxrpc_tlp_ack_trace trace), + + TP_ARGS(call, summary, trace), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(rxrpc_serial_t, serial) + __field(rxrpc_seq_t, tlp_seq) + __field(rxrpc_seq_t, hard_ack) + __field(enum rxrpc_tlp_ack_trace, trace) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->serial = call->tlp_serial; + __entry->tlp_seq = call->tlp_seq; + __entry->hard_ack = call->acks_hard_ack; + __entry->trace = trace; + ), + + TP_printk("c=%08x r=%08x pq=%08x hq=%08x %s", + __entry->call, __entry->serial, + __entry->tlp_seq, __entry->hard_ack, + __print_symbolic(__entry->trace, rxrpc_tlp_ack_traces)) + ); + +TRACE_EVENT(rxrpc_rack_timer, + TP_PROTO(struct rxrpc_call *call, ktime_t delay, bool exp), + + TP_ARGS(call, delay, exp), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(bool, exp) + __field(enum rxrpc_rack_timer_mode, mode) + __field(ktime_t, delay) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->exp = exp; + __entry->mode = call->rack_timer_mode; + __entry->delay = delay; + ), + + TP_printk("c=%08x %s %s to=%lld", + __entry->call, + __entry->exp ? "Exp" : "Set", + __print_symbolic(__entry->mode, rxrpc_rack_timer_modes), + ktime_to_us(__entry->delay)) + ); + #undef EM #undef E_ diff --git a/include/trace/events/task.h b/include/trace/events/task.h index 47b527464d1a..af535b053033 100644 --- a/include/trace/events/task.h +++ b/include/trace/events/task.h @@ -38,22 +38,56 @@ TRACE_EVENT(task_rename, TP_ARGS(task, comm), TP_STRUCT__entry( - __field( pid_t, pid) __array( char, oldcomm, TASK_COMM_LEN) __array( char, newcomm, TASK_COMM_LEN) __field( short, oom_score_adj) ), TP_fast_assign( - __entry->pid = task->pid; memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN); strscpy(entry->newcomm, comm, TASK_COMM_LEN); __entry->oom_score_adj = task->signal->oom_score_adj; ), - TP_printk("pid=%d oldcomm=%s newcomm=%s oom_score_adj=%hd", - __entry->pid, __entry->oldcomm, - __entry->newcomm, __entry->oom_score_adj) + TP_printk("oldcomm=%s newcomm=%s oom_score_adj=%hd", + __entry->oldcomm, __entry->newcomm, __entry->oom_score_adj) +); + +/** + * task_prctl_unknown - called on unknown prctl() option + * @option: option passed + * @arg2: arg2 passed + * @arg3: arg3 passed + * @arg4: arg4 passed + * @arg5: arg5 passed + * + * Called on an unknown prctl() option. + */ +TRACE_EVENT(task_prctl_unknown, + + TP_PROTO(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5), + + TP_ARGS(option, arg2, arg3, arg4, arg5), + + TP_STRUCT__entry( + __field( int, option) + __field( unsigned long, arg2) + __field( unsigned long, arg3) + __field( unsigned long, arg4) + __field( unsigned long, arg5) + ), + + TP_fast_assign( + __entry->option = option; + __entry->arg2 = arg2; + __entry->arg3 = arg3; + __entry->arg4 = arg4; + __entry->arg5 = arg5; + ), + + TP_printk("option=%d arg2=%ld arg3=%ld arg4=%ld arg5=%ld", + __entry->option, __entry->arg2, __entry->arg3, __entry->arg4, __entry->arg5) ); #endif diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h index 80f37a0d40d7..613475285643 100644 --- a/include/uapi/asm-generic/fcntl.h +++ b/include/uapi/asm-generic/fcntl.h @@ -6,7 +6,6 @@ /* * FMODE_EXEC is 0x20 - * FMODE_NONOTIFY is 0x4000000 * These cannot be used by userspace O_* until internal and external open * flags are split. * -Eric Paris diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index deacfd6dd197..aa5016ff3d91 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -143,6 +143,8 @@ #define SCM_TS_OPT_ID 81 +#define SO_RCVPRIORITY 82 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/include/uapi/drm/amdxdna_accel.h b/include/uapi/drm/amdxdna_accel.h new file mode 100644 index 000000000000..a706ead39082 --- /dev/null +++ b/include/uapi/drm/amdxdna_accel.h @@ -0,0 +1,501 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2022-2024, Advanced Micro Devices, Inc. + */ + +#ifndef _UAPI_AMDXDNA_ACCEL_H_ +#define _UAPI_AMDXDNA_ACCEL_H_ + +#include <linux/stddef.h> +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#define AMDXDNA_INVALID_CMD_HANDLE (~0UL) +#define AMDXDNA_INVALID_ADDR (~0UL) +#define AMDXDNA_INVALID_CTX_HANDLE 0 +#define AMDXDNA_INVALID_BO_HANDLE 0 +#define AMDXDNA_INVALID_FENCE_HANDLE 0 + +enum amdxdna_device_type { + AMDXDNA_DEV_TYPE_UNKNOWN = -1, + AMDXDNA_DEV_TYPE_KMQ, +}; + +enum amdxdna_drm_ioctl_id { + DRM_AMDXDNA_CREATE_HWCTX, + DRM_AMDXDNA_DESTROY_HWCTX, + DRM_AMDXDNA_CONFIG_HWCTX, + DRM_AMDXDNA_CREATE_BO, + DRM_AMDXDNA_GET_BO_INFO, + DRM_AMDXDNA_SYNC_BO, + DRM_AMDXDNA_EXEC_CMD, + DRM_AMDXDNA_GET_INFO, + DRM_AMDXDNA_SET_STATE, +}; + +/** + * struct qos_info - QoS information for driver. + * @gops: Giga operations per second. + * @fps: Frames per second. + * @dma_bandwidth: DMA bandwidtha. + * @latency: Frame response latency. + * @frame_exec_time: Frame execution time. + * @priority: Request priority. + * + * User program can provide QoS hints to driver. + */ +struct amdxdna_qos_info { + __u32 gops; + __u32 fps; + __u32 dma_bandwidth; + __u32 latency; + __u32 frame_exec_time; + __u32 priority; +}; + +/** + * struct amdxdna_drm_create_hwctx - Create hardware context. + * @ext: MBZ. + * @ext_flags: MBZ. + * @qos_p: Address of QoS info. + * @umq_bo: BO handle for user mode queue(UMQ). + * @log_buf_bo: BO handle for log buffer. + * @max_opc: Maximum operations per cycle. + * @num_tiles: Number of AIE tiles. + * @mem_size: Size of AIE tile memory. + * @umq_doorbell: Returned offset of doorbell associated with UMQ. + * @handle: Returned hardware context handle. + * @syncobj_handle: Returned syncobj handle for command completion. + */ +struct amdxdna_drm_create_hwctx { + __u64 ext; + __u64 ext_flags; + __u64 qos_p; + __u32 umq_bo; + __u32 log_buf_bo; + __u32 max_opc; + __u32 num_tiles; + __u32 mem_size; + __u32 umq_doorbell; + __u32 handle; + __u32 syncobj_handle; +}; + +/** + * struct amdxdna_drm_destroy_hwctx - Destroy hardware context. + * @handle: Hardware context handle. + * @pad: MBZ. + */ +struct amdxdna_drm_destroy_hwctx { + __u32 handle; + __u32 pad; +}; + +/** + * struct amdxdna_cu_config - configuration for one CU + * @cu_bo: CU configuration buffer bo handle. + * @cu_func: Function of a CU. + * @pad: MBZ. + */ +struct amdxdna_cu_config { + __u32 cu_bo; + __u8 cu_func; + __u8 pad[3]; +}; + +/** + * struct amdxdna_hwctx_param_config_cu - configuration for CUs in hardware context + * @num_cus: Number of CUs to configure. + * @pad: MBZ. + * @cu_configs: Array of CU configurations of struct amdxdna_cu_config. + */ +struct amdxdna_hwctx_param_config_cu { + __u16 num_cus; + __u16 pad[3]; + struct amdxdna_cu_config cu_configs[] __counted_by(num_cus); +}; + +enum amdxdna_drm_config_hwctx_param { + DRM_AMDXDNA_HWCTX_CONFIG_CU, + DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF, + DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF, +}; + +/** + * struct amdxdna_drm_config_hwctx - Configure hardware context. + * @handle: hardware context handle. + * @param_type: Value in enum amdxdna_drm_config_hwctx_param. Specifies the + * structure passed in via param_val. + * @param_val: A structure specified by the param_type struct member. + * @param_val_size: Size of the parameter buffer pointed to by the param_val. + * If param_val is not a pointer, driver can ignore this. + * @pad: MBZ. + * + * Note: if the param_val is a pointer pointing to a buffer, the maximum size + * of the buffer is 4KiB(PAGE_SIZE). + */ +struct amdxdna_drm_config_hwctx { + __u32 handle; + __u32 param_type; + __u64 param_val; + __u32 param_val_size; + __u32 pad; +}; + +enum amdxdna_bo_type { + AMDXDNA_BO_INVALID = 0, + AMDXDNA_BO_SHMEM, + AMDXDNA_BO_DEV_HEAP, + AMDXDNA_BO_DEV, + AMDXDNA_BO_CMD, +}; + +/** + * struct amdxdna_drm_create_bo - Create a buffer object. + * @flags: Buffer flags. MBZ. + * @vaddr: User VA of buffer if applied. MBZ. + * @size: Size in bytes. + * @type: Buffer type. + * @handle: Returned DRM buffer object handle. + */ +struct amdxdna_drm_create_bo { + __u64 flags; + __u64 vaddr; + __u64 size; + __u32 type; + __u32 handle; +}; + +/** + * struct amdxdna_drm_get_bo_info - Get buffer object information. + * @ext: MBZ. + * @ext_flags: MBZ. + * @handle: DRM buffer object handle. + * @pad: MBZ. + * @map_offset: Returned DRM fake offset for mmap(). + * @vaddr: Returned user VA of buffer. 0 in case user needs mmap(). + * @xdna_addr: Returned XDNA device virtual address. + */ +struct amdxdna_drm_get_bo_info { + __u64 ext; + __u64 ext_flags; + __u32 handle; + __u32 pad; + __u64 map_offset; + __u64 vaddr; + __u64 xdna_addr; +}; + +/** + * struct amdxdna_drm_sync_bo - Sync buffer object. + * @handle: Buffer object handle. + * @direction: Direction of sync, can be from device or to device. + * @offset: Offset in the buffer to sync. + * @size: Size in bytes. + */ +struct amdxdna_drm_sync_bo { + __u32 handle; +#define SYNC_DIRECT_TO_DEVICE 0U +#define SYNC_DIRECT_FROM_DEVICE 1U + __u32 direction; + __u64 offset; + __u64 size; +}; + +enum amdxdna_cmd_type { + AMDXDNA_CMD_SUBMIT_EXEC_BUF = 0, + AMDXDNA_CMD_SUBMIT_DEPENDENCY, + AMDXDNA_CMD_SUBMIT_SIGNAL, +}; + +/** + * struct amdxdna_drm_exec_cmd - Execute command. + * @ext: MBZ. + * @ext_flags: MBZ. + * @hwctx: Hardware context handle. + * @type: One of command type in enum amdxdna_cmd_type. + * @cmd_handles: Array of command handles or the command handle itself + * in case of just one. + * @args: Array of arguments for all command handles. + * @cmd_count: Number of command handles in the cmd_handles array. + * @arg_count: Number of arguments in the args array. + * @seq: Returned sequence number for this command. + */ +struct amdxdna_drm_exec_cmd { + __u64 ext; + __u64 ext_flags; + __u32 hwctx; + __u32 type; + __u64 cmd_handles; + __u64 args; + __u32 cmd_count; + __u32 arg_count; + __u64 seq; +}; + +/** + * struct amdxdna_drm_query_aie_status - Query the status of the AIE hardware + * @buffer: The user space buffer that will return the AIE status. + * @buffer_size: The size of the user space buffer. + * @cols_filled: A bitmap of AIE columns whose data has been returned in the buffer. + */ +struct amdxdna_drm_query_aie_status { + __u64 buffer; /* out */ + __u32 buffer_size; /* in */ + __u32 cols_filled; /* out */ +}; + +/** + * struct amdxdna_drm_query_aie_version - Query the version of the AIE hardware + * @major: The major version number. + * @minor: The minor version number. + */ +struct amdxdna_drm_query_aie_version { + __u32 major; /* out */ + __u32 minor; /* out */ +}; + +/** + * struct amdxdna_drm_query_aie_tile_metadata - Query the metadata of AIE tile (core, mem, shim) + * @row_count: The number of rows. + * @row_start: The starting row number. + * @dma_channel_count: The number of dma channels. + * @lock_count: The number of locks. + * @event_reg_count: The number of events. + * @pad: Structure padding. + */ +struct amdxdna_drm_query_aie_tile_metadata { + __u16 row_count; + __u16 row_start; + __u16 dma_channel_count; + __u16 lock_count; + __u16 event_reg_count; + __u16 pad[3]; +}; + +/** + * struct amdxdna_drm_query_aie_metadata - Query the metadata of the AIE hardware + * @col_size: The size of a column in bytes. + * @cols: The total number of columns. + * @rows: The total number of rows. + * @version: The version of the AIE hardware. + * @core: The metadata for all core tiles. + * @mem: The metadata for all mem tiles. + * @shim: The metadata for all shim tiles. + */ +struct amdxdna_drm_query_aie_metadata { + __u32 col_size; + __u16 cols; + __u16 rows; + struct amdxdna_drm_query_aie_version version; + struct amdxdna_drm_query_aie_tile_metadata core; + struct amdxdna_drm_query_aie_tile_metadata mem; + struct amdxdna_drm_query_aie_tile_metadata shim; +}; + +/** + * struct amdxdna_drm_query_clock - Metadata for a clock + * @name: The clock name. + * @freq_mhz: The clock frequency. + * @pad: Structure padding. + */ +struct amdxdna_drm_query_clock { + __u8 name[16]; + __u32 freq_mhz; + __u32 pad; +}; + +/** + * struct amdxdna_drm_query_clock_metadata - Query metadata for clocks + * @mp_npu_clock: The metadata for MP-NPU clock. + * @h_clock: The metadata for H clock. + */ +struct amdxdna_drm_query_clock_metadata { + struct amdxdna_drm_query_clock mp_npu_clock; + struct amdxdna_drm_query_clock h_clock; +}; + +enum amdxdna_sensor_type { + AMDXDNA_SENSOR_TYPE_POWER +}; + +/** + * struct amdxdna_drm_query_sensor - The data for single sensor. + * @label: The name for a sensor. + * @input: The current value of the sensor. + * @max: The maximum value possible for the sensor. + * @average: The average value of the sensor. + * @highest: The highest recorded sensor value for this driver load for the sensor. + * @status: The sensor status. + * @units: The sensor units. + * @unitm: Translates value member variables into the correct unit via (pow(10, unitm) * value). + * @type: The sensor type from enum amdxdna_sensor_type. + * @pad: Structure padding. + */ +struct amdxdna_drm_query_sensor { + __u8 label[64]; + __u32 input; + __u32 max; + __u32 average; + __u32 highest; + __u8 status[64]; + __u8 units[16]; + __s8 unitm; + __u8 type; + __u8 pad[6]; +}; + +/** + * struct amdxdna_drm_query_hwctx - The data for single context. + * @context_id: The ID for this context. + * @start_col: The starting column for the partition assigned to this context. + * @num_col: The number of columns in the partition assigned to this context. + * @pad: Structure padding. + * @pid: The Process ID of the process that created this context. + * @command_submissions: The number of commands submitted to this context. + * @command_completions: The number of commands completed by this context. + * @migrations: The number of times this context has been moved to a different partition. + * @preemptions: The number of times this context has been preempted by another context in the + * same partition. + * @errors: The errors for this context. + */ +struct amdxdna_drm_query_hwctx { + __u32 context_id; + __u32 start_col; + __u32 num_col; + __u32 pad; + __s64 pid; + __u64 command_submissions; + __u64 command_completions; + __u64 migrations; + __u64 preemptions; + __u64 errors; +}; + +enum amdxdna_power_mode_type { + POWER_MODE_DEFAULT, /* Fallback to calculated DPM */ + POWER_MODE_LOW, /* Set frequency to lowest DPM */ + POWER_MODE_MEDIUM, /* Set frequency to medium DPM */ + POWER_MODE_HIGH, /* Set frequency to highest DPM */ + POWER_MODE_TURBO, /* Maximum power */ +}; + +/** + * struct amdxdna_drm_get_power_mode - Get the configured power mode + * @power_mode: The mode type from enum amdxdna_power_mode_type + * @pad: Structure padding. + */ +struct amdxdna_drm_get_power_mode { + __u8 power_mode; + __u8 pad[7]; +}; + +/** + * struct amdxdna_drm_query_firmware_version - Query the firmware version + * @major: The major version number + * @minor: The minor version number + * @patch: The patch level version number + * @build: The build ID + */ +struct amdxdna_drm_query_firmware_version { + __u32 major; /* out */ + __u32 minor; /* out */ + __u32 patch; /* out */ + __u32 build; /* out */ +}; + +enum amdxdna_drm_get_param { + DRM_AMDXDNA_QUERY_AIE_STATUS, + DRM_AMDXDNA_QUERY_AIE_METADATA, + DRM_AMDXDNA_QUERY_AIE_VERSION, + DRM_AMDXDNA_QUERY_CLOCK_METADATA, + DRM_AMDXDNA_QUERY_SENSORS, + DRM_AMDXDNA_QUERY_HW_CONTEXTS, + DRM_AMDXDNA_QUERY_FIRMWARE_VERSION = 8, + DRM_AMDXDNA_GET_POWER_MODE, +}; + +/** + * struct amdxdna_drm_get_info - Get some information from the AIE hardware. + * @param: Value in enum amdxdna_drm_get_param. Specifies the structure passed in the buffer. + * @buffer_size: Size of the input buffer. Size needed/written by the kernel. + * @buffer: A structure specified by the param struct member. + */ +struct amdxdna_drm_get_info { + __u32 param; /* in */ + __u32 buffer_size; /* in/out */ + __u64 buffer; /* in/out */ +}; + +enum amdxdna_drm_set_param { + DRM_AMDXDNA_SET_POWER_MODE, + DRM_AMDXDNA_WRITE_AIE_MEM, + DRM_AMDXDNA_WRITE_AIE_REG, +}; + +/** + * struct amdxdna_drm_set_state - Set the state of the AIE hardware. + * @param: Value in enum amdxdna_drm_set_param. + * @buffer_size: Size of the input param. + * @buffer: Pointer to the input param. + */ +struct amdxdna_drm_set_state { + __u32 param; /* in */ + __u32 buffer_size; /* in */ + __u64 buffer; /* in */ +}; + +/** + * struct amdxdna_drm_set_power_mode - Set the power mode of the AIE hardware + * @power_mode: The sensor type from enum amdxdna_power_mode_type + * @pad: MBZ. + */ +struct amdxdna_drm_set_power_mode { + __u8 power_mode; + __u8 pad[7]; +}; + +#define DRM_IOCTL_AMDXDNA_CREATE_HWCTX \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_CREATE_HWCTX, \ + struct amdxdna_drm_create_hwctx) + +#define DRM_IOCTL_AMDXDNA_DESTROY_HWCTX \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_DESTROY_HWCTX, \ + struct amdxdna_drm_destroy_hwctx) + +#define DRM_IOCTL_AMDXDNA_CONFIG_HWCTX \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_CONFIG_HWCTX, \ + struct amdxdna_drm_config_hwctx) + +#define DRM_IOCTL_AMDXDNA_CREATE_BO \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_CREATE_BO, \ + struct amdxdna_drm_create_bo) + +#define DRM_IOCTL_AMDXDNA_GET_BO_INFO \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_GET_BO_INFO, \ + struct amdxdna_drm_get_bo_info) + +#define DRM_IOCTL_AMDXDNA_SYNC_BO \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_SYNC_BO, \ + struct amdxdna_drm_sync_bo) + +#define DRM_IOCTL_AMDXDNA_EXEC_CMD \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_EXEC_CMD, \ + struct amdxdna_drm_exec_cmd) + +#define DRM_IOCTL_AMDXDNA_GET_INFO \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_GET_INFO, \ + struct amdxdna_drm_get_info) + +#define DRM_IOCTL_AMDXDNA_SET_STATE \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_SET_STATE, \ + struct amdxdna_drm_set_state) + +#if defined(__cplusplus) +} /* extern c end */ +#endif + +#endif /* _UAPI_AMDXDNA_ACCEL_H_ */ diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index b916aab80dde..2342cb90857e 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -90,6 +90,7 @@ struct drm_msm_timespec { #define MSM_PARAM_RAYTRACING 0x11 /* RO */ #define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */ #define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */ +#define MSM_PARAM_UCHE_TRAP_BASE 0x14 /* RO */ /* For backwards compat. The original support for preemption was based on * a single ring per priority level so # of priority levels equals the # diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h index 87c9cb555dd1..b99763cbae48 100644 --- a/include/uapi/drm/panthor_drm.h +++ b/include/uapi/drm/panthor_drm.h @@ -923,6 +923,15 @@ enum drm_panthor_group_state_flags { * When a group ends up with this flag set, no jobs can be submitted to its queues. */ DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1, + + /** + * @DRM_PANTHOR_GROUP_STATE_INNOCENT: Group was killed during a reset caused by other + * groups. + * + * This flag can only be set if DRM_PANTHOR_GROUP_STATE_TIMEDOUT is set and + * DRM_PANTHOR_GROUP_STATE_FATAL_FAULT is not. + */ + DRM_PANTHOR_GROUP_STATE_INNOCENT = 1 << 2, }; /** diff --git a/include/uapi/drm/qaic_accel.h b/include/uapi/drm/qaic_accel.h index d3ca876a08e9..c92d0309d583 100644 --- a/include/uapi/drm/qaic_accel.h +++ b/include/uapi/drm/qaic_accel.h @@ -64,7 +64,7 @@ struct qaic_manage_trans_hdr { /** * struct qaic_manage_trans_passthrough - Defines a passthrough transaction. * @hdr: In. Header to identify this transaction. - * @data: In. Payload of this ransaction. Opaque to the driver. Userspace must + * @data: In. Payload of this transaction. Opaque to the driver. Userspace must * encode in little endian and align/pad to 64-bit. */ struct qaic_manage_trans_passthrough { diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 2376c73abca1..dbbc404d2b3d 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -43,6 +43,7 @@ extern "C" { #define DRM_V3D_PERFMON_GET_VALUES 0x0a #define DRM_V3D_SUBMIT_CPU 0x0b #define DRM_V3D_PERFMON_GET_COUNTER 0x0c +#define DRM_V3D_PERFMON_SET_GLOBAL 0x0d #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) @@ -61,6 +62,8 @@ extern "C" { #define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu) #define DRM_IOCTL_V3D_PERFMON_GET_COUNTER DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_COUNTER, \ struct drm_v3d_perfmon_get_counter) +#define DRM_IOCTL_V3D_PERFMON_SET_GLOBAL DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_PERFMON_SET_GLOBAL, \ + struct drm_v3d_perfmon_set_global) #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01 #define DRM_V3D_SUBMIT_EXTENSION 0x02 @@ -766,6 +769,21 @@ struct drm_v3d_perfmon_get_counter { __u8 reserved[7]; }; +#define DRM_V3D_PERFMON_CLEAR_GLOBAL 0x0001 + +/** + * struct drm_v3d_perfmon_set_global - ioctl to define a global performance + * monitor + * + * The global performance monitor will be used for all jobs. If a global + * performance monitor is defined, jobs with a self-defined performance + * monitor won't be allowed. + */ +struct drm_v3d_perfmon_set_global { + __u32 flags; + __u32 id; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 4a8a4a63e99c..f62689ca861a 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -1486,6 +1486,8 @@ struct drm_xe_oa_unit { __u64 capabilities; #define DRM_XE_OA_CAPS_BASE (1 << 0) #define DRM_XE_OA_CAPS_SYNCS (1 << 1) +#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2) +#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3) /** @oa_timestamp_freq: OA timestamp freq */ __u64 oa_timestamp_freq; @@ -1651,6 +1653,20 @@ enum drm_xe_oa_property_id { * to the VM bind case. */ DRM_XE_OA_PROPERTY_SYNCS, + + /** + * @DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE: Size of OA buffer to be + * allocated by the driver in bytes. Supported sizes are powers of + * 2 from 128 KiB to 128 MiB. When not specified, a 16 MiB OA + * buffer is allocated by default. + */ + DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE, + + /** + * @DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS: Number of reports to wait + * for before unblocking poll or read + */ + DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS, }; /** diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 75e21a135483..d9a069b4a775 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -161,6 +161,7 @@ #define AUDIT_INTEGRITY_RULE 1805 /* policy rule */ #define AUDIT_INTEGRITY_EVM_XATTR 1806 /* New EVM-covered xattr */ #define AUDIT_INTEGRITY_POLICY_RULE 1807 /* IMA policy rules */ +#define AUDIT_INTEGRITY_USERSPACE 1808 /* Userspace enforced data integrity */ #define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4162afc6b5d0..2acf9b336371 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1573,6 +1573,16 @@ union bpf_attr { * If provided, prog_flags should have BPF_F_TOKEN_FD flag set. */ __s32 prog_token_fd; + /* The fd_array_cnt can be used to pass the length of the + * fd_array array. In this case all the [map] file descriptors + * passed in this array will be bound to the program, even if + * the maps are not referenced directly. The functionality is + * similar to the BPF_PROG_BIND_MAP syscall, but maps can be + * used by the verifier during the program load. If provided, + * then the fd_array[0,...,fd_array_cnt-1] is expected to be + * continuous. + */ + __u32 fd_array_cnt; }; struct { /* anonymous struct used by BPF_OBJ_* commands */ diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index 1990b5700f69..b08c7378164d 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -286,9 +286,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 48 +#define DM_VERSION_MINOR 49 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2023-03-01)" +#define DM_VERSION_EXTRA "-ioctl (2025-01-17)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 7e1b3820f91f..d1089b88efc7 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -681,6 +681,7 @@ enum ethtool_link_ext_substate_module { * @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics * @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics * @ETH_SS_STATS_RMON: names of RMON statistics + * @ETH_SS_STATS_PHY: names of PHY(dev) statistics * * @ETH_SS_COUNT: number of defined string sets */ @@ -706,6 +707,7 @@ enum ethtool_stringset { ETH_SS_STATS_ETH_MAC, ETH_SS_STATS_ETH_CTRL, ETH_SS_STATS_RMON, + ETH_SS_STATS_PHY, /* add new constants above here */ ETH_SS_COUNT diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 283305f6b063..9ff72cfb2e98 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -10,545 +10,12 @@ #define _UAPI_LINUX_ETHTOOL_NETLINK_H_ #include <linux/ethtool.h> - -/* message types - userspace to kernel */ -enum { - ETHTOOL_MSG_USER_NONE, - ETHTOOL_MSG_STRSET_GET, - ETHTOOL_MSG_LINKINFO_GET, - ETHTOOL_MSG_LINKINFO_SET, - ETHTOOL_MSG_LINKMODES_GET, - ETHTOOL_MSG_LINKMODES_SET, - ETHTOOL_MSG_LINKSTATE_GET, - ETHTOOL_MSG_DEBUG_GET, - ETHTOOL_MSG_DEBUG_SET, - ETHTOOL_MSG_WOL_GET, - ETHTOOL_MSG_WOL_SET, - ETHTOOL_MSG_FEATURES_GET, - ETHTOOL_MSG_FEATURES_SET, - ETHTOOL_MSG_PRIVFLAGS_GET, - ETHTOOL_MSG_PRIVFLAGS_SET, - ETHTOOL_MSG_RINGS_GET, - ETHTOOL_MSG_RINGS_SET, - ETHTOOL_MSG_CHANNELS_GET, - ETHTOOL_MSG_CHANNELS_SET, - ETHTOOL_MSG_COALESCE_GET, - ETHTOOL_MSG_COALESCE_SET, - ETHTOOL_MSG_PAUSE_GET, - ETHTOOL_MSG_PAUSE_SET, - ETHTOOL_MSG_EEE_GET, - ETHTOOL_MSG_EEE_SET, - ETHTOOL_MSG_TSINFO_GET, - ETHTOOL_MSG_CABLE_TEST_ACT, - ETHTOOL_MSG_CABLE_TEST_TDR_ACT, - ETHTOOL_MSG_TUNNEL_INFO_GET, - ETHTOOL_MSG_FEC_GET, - ETHTOOL_MSG_FEC_SET, - ETHTOOL_MSG_MODULE_EEPROM_GET, - ETHTOOL_MSG_STATS_GET, - ETHTOOL_MSG_PHC_VCLOCKS_GET, - ETHTOOL_MSG_MODULE_GET, - ETHTOOL_MSG_MODULE_SET, - ETHTOOL_MSG_PSE_GET, - ETHTOOL_MSG_PSE_SET, - ETHTOOL_MSG_RSS_GET, - ETHTOOL_MSG_PLCA_GET_CFG, - ETHTOOL_MSG_PLCA_SET_CFG, - ETHTOOL_MSG_PLCA_GET_STATUS, - ETHTOOL_MSG_MM_GET, - ETHTOOL_MSG_MM_SET, - ETHTOOL_MSG_MODULE_FW_FLASH_ACT, - ETHTOOL_MSG_PHY_GET, - - /* add new constants above here */ - __ETHTOOL_MSG_USER_CNT, - ETHTOOL_MSG_USER_MAX = __ETHTOOL_MSG_USER_CNT - 1 -}; - -/* message types - kernel to userspace */ -enum { - ETHTOOL_MSG_KERNEL_NONE, - ETHTOOL_MSG_STRSET_GET_REPLY, - ETHTOOL_MSG_LINKINFO_GET_REPLY, - ETHTOOL_MSG_LINKINFO_NTF, - ETHTOOL_MSG_LINKMODES_GET_REPLY, - ETHTOOL_MSG_LINKMODES_NTF, - ETHTOOL_MSG_LINKSTATE_GET_REPLY, - ETHTOOL_MSG_DEBUG_GET_REPLY, - ETHTOOL_MSG_DEBUG_NTF, - ETHTOOL_MSG_WOL_GET_REPLY, - ETHTOOL_MSG_WOL_NTF, - ETHTOOL_MSG_FEATURES_GET_REPLY, - ETHTOOL_MSG_FEATURES_SET_REPLY, - ETHTOOL_MSG_FEATURES_NTF, - ETHTOOL_MSG_PRIVFLAGS_GET_REPLY, - ETHTOOL_MSG_PRIVFLAGS_NTF, - ETHTOOL_MSG_RINGS_GET_REPLY, - ETHTOOL_MSG_RINGS_NTF, - ETHTOOL_MSG_CHANNELS_GET_REPLY, - ETHTOOL_MSG_CHANNELS_NTF, - ETHTOOL_MSG_COALESCE_GET_REPLY, - ETHTOOL_MSG_COALESCE_NTF, - ETHTOOL_MSG_PAUSE_GET_REPLY, - ETHTOOL_MSG_PAUSE_NTF, - ETHTOOL_MSG_EEE_GET_REPLY, - ETHTOOL_MSG_EEE_NTF, - ETHTOOL_MSG_TSINFO_GET_REPLY, - ETHTOOL_MSG_CABLE_TEST_NTF, - ETHTOOL_MSG_CABLE_TEST_TDR_NTF, - ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY, - ETHTOOL_MSG_FEC_GET_REPLY, - ETHTOOL_MSG_FEC_NTF, - ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY, - ETHTOOL_MSG_STATS_GET_REPLY, - ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY, - ETHTOOL_MSG_MODULE_GET_REPLY, - ETHTOOL_MSG_MODULE_NTF, - ETHTOOL_MSG_PSE_GET_REPLY, - ETHTOOL_MSG_RSS_GET_REPLY, - ETHTOOL_MSG_PLCA_GET_CFG_REPLY, - ETHTOOL_MSG_PLCA_GET_STATUS_REPLY, - ETHTOOL_MSG_PLCA_NTF, - ETHTOOL_MSG_MM_GET_REPLY, - ETHTOOL_MSG_MM_NTF, - ETHTOOL_MSG_MODULE_FW_FLASH_NTF, - ETHTOOL_MSG_PHY_GET_REPLY, - ETHTOOL_MSG_PHY_NTF, - - /* add new constants above here */ - __ETHTOOL_MSG_KERNEL_CNT, - ETHTOOL_MSG_KERNEL_MAX = __ETHTOOL_MSG_KERNEL_CNT - 1 -}; - -/* request header */ - -enum ethtool_header_flags { - ETHTOOL_FLAG_COMPACT_BITSETS = 1 << 0, /* use compact bitsets in reply */ - ETHTOOL_FLAG_OMIT_REPLY = 1 << 1, /* provide optional reply for SET or ACT requests */ - ETHTOOL_FLAG_STATS = 1 << 2, /* request statistics, if supported by the driver */ -}; +#include <linux/ethtool_netlink_generated.h> #define ETHTOOL_FLAG_ALL (ETHTOOL_FLAG_COMPACT_BITSETS | \ ETHTOOL_FLAG_OMIT_REPLY | \ ETHTOOL_FLAG_STATS) -enum { - ETHTOOL_A_HEADER_UNSPEC, - ETHTOOL_A_HEADER_DEV_INDEX, /* u32 */ - ETHTOOL_A_HEADER_DEV_NAME, /* string */ - ETHTOOL_A_HEADER_FLAGS, /* u32 - ETHTOOL_FLAG_* */ - ETHTOOL_A_HEADER_PHY_INDEX, /* u32 */ - - /* add new constants above here */ - __ETHTOOL_A_HEADER_CNT, - ETHTOOL_A_HEADER_MAX = __ETHTOOL_A_HEADER_CNT - 1 -}; - -/* bit sets */ - -enum { - ETHTOOL_A_BITSET_BIT_UNSPEC, - ETHTOOL_A_BITSET_BIT_INDEX, /* u32 */ - ETHTOOL_A_BITSET_BIT_NAME, /* string */ - ETHTOOL_A_BITSET_BIT_VALUE, /* flag */ - - /* add new constants above here */ - __ETHTOOL_A_BITSET_BIT_CNT, - ETHTOOL_A_BITSET_BIT_MAX = __ETHTOOL_A_BITSET_BIT_CNT - 1 -}; - -enum { - ETHTOOL_A_BITSET_BITS_UNSPEC, - ETHTOOL_A_BITSET_BITS_BIT, /* nest - _A_BITSET_BIT_* */ - - /* add new constants above here */ - __ETHTOOL_A_BITSET_BITS_CNT, - ETHTOOL_A_BITSET_BITS_MAX = __ETHTOOL_A_BITSET_BITS_CNT - 1 -}; - -enum { - ETHTOOL_A_BITSET_UNSPEC, - ETHTOOL_A_BITSET_NOMASK, /* flag */ - ETHTOOL_A_BITSET_SIZE, /* u32 */ - ETHTOOL_A_BITSET_BITS, /* nest - _A_BITSET_BITS_* */ - ETHTOOL_A_BITSET_VALUE, /* binary */ - ETHTOOL_A_BITSET_MASK, /* binary */ - - /* add new constants above here */ - __ETHTOOL_A_BITSET_CNT, - ETHTOOL_A_BITSET_MAX = __ETHTOOL_A_BITSET_CNT - 1 -}; - -/* string sets */ - -enum { - ETHTOOL_A_STRING_UNSPEC, - ETHTOOL_A_STRING_INDEX, /* u32 */ - ETHTOOL_A_STRING_VALUE, /* string */ - - /* add new constants above here */ - __ETHTOOL_A_STRING_CNT, - ETHTOOL_A_STRING_MAX = __ETHTOOL_A_STRING_CNT - 1 -}; - -enum { - ETHTOOL_A_STRINGS_UNSPEC, - ETHTOOL_A_STRINGS_STRING, /* nest - _A_STRINGS_* */ - - /* add new constants above here */ - __ETHTOOL_A_STRINGS_CNT, - ETHTOOL_A_STRINGS_MAX = __ETHTOOL_A_STRINGS_CNT - 1 -}; - -enum { - ETHTOOL_A_STRINGSET_UNSPEC, - ETHTOOL_A_STRINGSET_ID, /* u32 */ - ETHTOOL_A_STRINGSET_COUNT, /* u32 */ - ETHTOOL_A_STRINGSET_STRINGS, /* nest - _A_STRINGS_* */ - - /* add new constants above here */ - __ETHTOOL_A_STRINGSET_CNT, - ETHTOOL_A_STRINGSET_MAX = __ETHTOOL_A_STRINGSET_CNT - 1 -}; - -enum { - ETHTOOL_A_STRINGSETS_UNSPEC, - ETHTOOL_A_STRINGSETS_STRINGSET, /* nest - _A_STRINGSET_* */ - - /* add new constants above here */ - __ETHTOOL_A_STRINGSETS_CNT, - ETHTOOL_A_STRINGSETS_MAX = __ETHTOOL_A_STRINGSETS_CNT - 1 -}; - -/* STRSET */ - -enum { - ETHTOOL_A_STRSET_UNSPEC, - ETHTOOL_A_STRSET_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_STRSET_STRINGSETS, /* nest - _A_STRINGSETS_* */ - ETHTOOL_A_STRSET_COUNTS_ONLY, /* flag */ - - /* add new constants above here */ - __ETHTOOL_A_STRSET_CNT, - ETHTOOL_A_STRSET_MAX = __ETHTOOL_A_STRSET_CNT - 1 -}; - -/* LINKINFO */ - -enum { - ETHTOOL_A_LINKINFO_UNSPEC, - ETHTOOL_A_LINKINFO_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_LINKINFO_PORT, /* u8 */ - ETHTOOL_A_LINKINFO_PHYADDR, /* u8 */ - ETHTOOL_A_LINKINFO_TP_MDIX, /* u8 */ - ETHTOOL_A_LINKINFO_TP_MDIX_CTRL, /* u8 */ - ETHTOOL_A_LINKINFO_TRANSCEIVER, /* u8 */ - - /* add new constants above here */ - __ETHTOOL_A_LINKINFO_CNT, - ETHTOOL_A_LINKINFO_MAX = __ETHTOOL_A_LINKINFO_CNT - 1 -}; - -/* LINKMODES */ - -enum { - ETHTOOL_A_LINKMODES_UNSPEC, - ETHTOOL_A_LINKMODES_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_LINKMODES_AUTONEG, /* u8 */ - ETHTOOL_A_LINKMODES_OURS, /* bitset */ - ETHTOOL_A_LINKMODES_PEER, /* bitset */ - ETHTOOL_A_LINKMODES_SPEED, /* u32 */ - ETHTOOL_A_LINKMODES_DUPLEX, /* u8 */ - ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG, /* u8 */ - ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE, /* u8 */ - ETHTOOL_A_LINKMODES_LANES, /* u32 */ - ETHTOOL_A_LINKMODES_RATE_MATCHING, /* u8 */ - - /* add new constants above here */ - __ETHTOOL_A_LINKMODES_CNT, - ETHTOOL_A_LINKMODES_MAX = __ETHTOOL_A_LINKMODES_CNT - 1 -}; - -/* LINKSTATE */ - -enum { - ETHTOOL_A_LINKSTATE_UNSPEC, - ETHTOOL_A_LINKSTATE_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_LINKSTATE_LINK, /* u8 */ - ETHTOOL_A_LINKSTATE_SQI, /* u32 */ - ETHTOOL_A_LINKSTATE_SQI_MAX, /* u32 */ - ETHTOOL_A_LINKSTATE_EXT_STATE, /* u8 */ - ETHTOOL_A_LINKSTATE_EXT_SUBSTATE, /* u8 */ - ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT, /* u32 */ - - /* add new constants above here */ - __ETHTOOL_A_LINKSTATE_CNT, - ETHTOOL_A_LINKSTATE_MAX = __ETHTOOL_A_LINKSTATE_CNT - 1 -}; - -/* DEBUG */ - -enum { - ETHTOOL_A_DEBUG_UNSPEC, - ETHTOOL_A_DEBUG_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_DEBUG_MSGMASK, /* bitset */ - - /* add new constants above here */ - __ETHTOOL_A_DEBUG_CNT, - ETHTOOL_A_DEBUG_MAX = __ETHTOOL_A_DEBUG_CNT - 1 -}; - -/* WOL */ - -enum { - ETHTOOL_A_WOL_UNSPEC, - ETHTOOL_A_WOL_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_WOL_MODES, /* bitset */ - ETHTOOL_A_WOL_SOPASS, /* binary */ - - /* add new constants above here */ - __ETHTOOL_A_WOL_CNT, - ETHTOOL_A_WOL_MAX = __ETHTOOL_A_WOL_CNT - 1 -}; - -/* FEATURES */ - -enum { - ETHTOOL_A_FEATURES_UNSPEC, - ETHTOOL_A_FEATURES_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_FEATURES_HW, /* bitset */ - ETHTOOL_A_FEATURES_WANTED, /* bitset */ - ETHTOOL_A_FEATURES_ACTIVE, /* bitset */ - ETHTOOL_A_FEATURES_NOCHANGE, /* bitset */ - - /* add new constants above here */ - __ETHTOOL_A_FEATURES_CNT, - ETHTOOL_A_FEATURES_MAX = __ETHTOOL_A_FEATURES_CNT - 1 -}; - -/* PRIVFLAGS */ - -enum { - ETHTOOL_A_PRIVFLAGS_UNSPEC, - ETHTOOL_A_PRIVFLAGS_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_PRIVFLAGS_FLAGS, /* bitset */ - - /* add new constants above here */ - __ETHTOOL_A_PRIVFLAGS_CNT, - ETHTOOL_A_PRIVFLAGS_MAX = __ETHTOOL_A_PRIVFLAGS_CNT - 1 -}; - -/* RINGS */ - -enum { - ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0, - ETHTOOL_TCP_DATA_SPLIT_DISABLED, - ETHTOOL_TCP_DATA_SPLIT_ENABLED, -}; - -enum { - ETHTOOL_A_RINGS_UNSPEC, - ETHTOOL_A_RINGS_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_RINGS_RX_MAX, /* u32 */ - ETHTOOL_A_RINGS_RX_MINI_MAX, /* u32 */ - ETHTOOL_A_RINGS_RX_JUMBO_MAX, /* u32 */ - ETHTOOL_A_RINGS_TX_MAX, /* u32 */ - ETHTOOL_A_RINGS_RX, /* u32 */ - ETHTOOL_A_RINGS_RX_MINI, /* u32 */ - ETHTOOL_A_RINGS_RX_JUMBO, /* u32 */ - ETHTOOL_A_RINGS_TX, /* u32 */ - ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */ - ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */ - ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */ - ETHTOOL_A_RINGS_TX_PUSH, /* u8 */ - ETHTOOL_A_RINGS_RX_PUSH, /* u8 */ - ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, /* u32 */ - ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, /* u32 */ - - /* add new constants above here */ - __ETHTOOL_A_RINGS_CNT, - ETHTOOL_A_RINGS_MAX = (__ETHTOOL_A_RINGS_CNT - 1) -}; - -/* CHANNELS */ - -enum { - ETHTOOL_A_CHANNELS_UNSPEC, - ETHTOOL_A_CHANNELS_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_CHANNELS_RX_MAX, /* u32 */ - ETHTOOL_A_CHANNELS_TX_MAX, /* u32 */ - ETHTOOL_A_CHANNELS_OTHER_MAX, /* u32 */ - ETHTOOL_A_CHANNELS_COMBINED_MAX, /* u32 */ - ETHTOOL_A_CHANNELS_RX_COUNT, /* u32 */ - ETHTOOL_A_CHANNELS_TX_COUNT, /* u32 */ - ETHTOOL_A_CHANNELS_OTHER_COUNT, /* u32 */ - ETHTOOL_A_CHANNELS_COMBINED_COUNT, /* u32 */ - - /* add new constants above here */ - __ETHTOOL_A_CHANNELS_CNT, - ETHTOOL_A_CHANNELS_MAX = (__ETHTOOL_A_CHANNELS_CNT - 1) -}; - -/* COALESCE */ - -enum { - ETHTOOL_A_COALESCE_UNSPEC, - ETHTOOL_A_COALESCE_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_COALESCE_RX_USECS, /* u32 */ - ETHTOOL_A_COALESCE_RX_MAX_FRAMES, /* u32 */ - ETHTOOL_A_COALESCE_RX_USECS_IRQ, /* u32 */ - ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ, /* u32 */ - ETHTOOL_A_COALESCE_TX_USECS, /* u32 */ - ETHTOOL_A_COALESCE_TX_MAX_FRAMES, /* u32 */ - ETHTOOL_A_COALESCE_TX_USECS_IRQ, /* u32 */ - ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ, /* u32 */ - ETHTOOL_A_COALESCE_STATS_BLOCK_USECS, /* u32 */ - ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX, /* u8 */ - ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX, /* u8 */ - ETHTOOL_A_COALESCE_PKT_RATE_LOW, /* u32 */ - ETHTOOL_A_COALESCE_RX_USECS_LOW, /* u32 */ - ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW, /* u32 */ - ETHTOOL_A_COALESCE_TX_USECS_LOW, /* u32 */ - ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW, /* u32 */ - ETHTOOL_A_COALESCE_PKT_RATE_HIGH, /* u32 */ - ETHTOOL_A_COALESCE_RX_USECS_HIGH, /* u32 */ - ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH, /* u32 */ - ETHTOOL_A_COALESCE_TX_USECS_HIGH, /* u32 */ - ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH, /* u32 */ - ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, /* u32 */ - ETHTOOL_A_COALESCE_USE_CQE_MODE_TX, /* u8 */ - ETHTOOL_A_COALESCE_USE_CQE_MODE_RX, /* u8 */ - ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES, /* u32 */ - ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES, /* u32 */ - ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, /* u32 */ - /* nest - _A_PROFILE_IRQ_MODERATION */ - ETHTOOL_A_COALESCE_RX_PROFILE, - /* nest - _A_PROFILE_IRQ_MODERATION */ - ETHTOOL_A_COALESCE_TX_PROFILE, - - /* add new constants above here */ - __ETHTOOL_A_COALESCE_CNT, - ETHTOOL_A_COALESCE_MAX = (__ETHTOOL_A_COALESCE_CNT - 1) -}; - -enum { - ETHTOOL_A_PROFILE_UNSPEC, - /* nest, _A_IRQ_MODERATION_* */ - ETHTOOL_A_PROFILE_IRQ_MODERATION, - __ETHTOOL_A_PROFILE_CNT, - ETHTOOL_A_PROFILE_MAX = (__ETHTOOL_A_PROFILE_CNT - 1) -}; - -enum { - ETHTOOL_A_IRQ_MODERATION_UNSPEC, - ETHTOOL_A_IRQ_MODERATION_USEC, /* u32 */ - ETHTOOL_A_IRQ_MODERATION_PKTS, /* u32 */ - ETHTOOL_A_IRQ_MODERATION_COMPS, /* u32 */ - - __ETHTOOL_A_IRQ_MODERATION_CNT, - ETHTOOL_A_IRQ_MODERATION_MAX = (__ETHTOOL_A_IRQ_MODERATION_CNT - 1) -}; - -/* PAUSE */ - -enum { - ETHTOOL_A_PAUSE_UNSPEC, - ETHTOOL_A_PAUSE_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_PAUSE_AUTONEG, /* u8 */ - ETHTOOL_A_PAUSE_RX, /* u8 */ - ETHTOOL_A_PAUSE_TX, /* u8 */ - ETHTOOL_A_PAUSE_STATS, /* nest - _PAUSE_STAT_* */ - ETHTOOL_A_PAUSE_STATS_SRC, /* u32 */ - - /* add new constants above here */ - __ETHTOOL_A_PAUSE_CNT, - ETHTOOL_A_PAUSE_MAX = (__ETHTOOL_A_PAUSE_CNT - 1) -}; - -enum { - ETHTOOL_A_PAUSE_STAT_UNSPEC, - ETHTOOL_A_PAUSE_STAT_PAD, - - ETHTOOL_A_PAUSE_STAT_TX_FRAMES, - ETHTOOL_A_PAUSE_STAT_RX_FRAMES, - - /* add new constants above here - * adjust ETHTOOL_PAUSE_STAT_CNT if adding non-stats! - */ - __ETHTOOL_A_PAUSE_STAT_CNT, - ETHTOOL_A_PAUSE_STAT_MAX = (__ETHTOOL_A_PAUSE_STAT_CNT - 1) -}; - -/* EEE */ - -enum { - ETHTOOL_A_EEE_UNSPEC, - ETHTOOL_A_EEE_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_EEE_MODES_OURS, /* bitset */ - ETHTOOL_A_EEE_MODES_PEER, /* bitset */ - ETHTOOL_A_EEE_ACTIVE, /* u8 */ - ETHTOOL_A_EEE_ENABLED, /* u8 */ - ETHTOOL_A_EEE_TX_LPI_ENABLED, /* u8 */ - ETHTOOL_A_EEE_TX_LPI_TIMER, /* u32 */ - - /* add new constants above here */ - __ETHTOOL_A_EEE_CNT, - ETHTOOL_A_EEE_MAX = (__ETHTOOL_A_EEE_CNT - 1) -}; - -/* TSINFO */ - -enum { - ETHTOOL_A_TSINFO_UNSPEC, - ETHTOOL_A_TSINFO_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_TSINFO_TIMESTAMPING, /* bitset */ - ETHTOOL_A_TSINFO_TX_TYPES, /* bitset */ - ETHTOOL_A_TSINFO_RX_FILTERS, /* bitset */ - ETHTOOL_A_TSINFO_PHC_INDEX, /* u32 */ - ETHTOOL_A_TSINFO_STATS, /* nest - _A_TSINFO_STAT */ - - /* add new constants above here */ - __ETHTOOL_A_TSINFO_CNT, - ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1) -}; - -enum { - ETHTOOL_A_TS_STAT_UNSPEC, - - ETHTOOL_A_TS_STAT_TX_PKTS, /* uint */ - ETHTOOL_A_TS_STAT_TX_LOST, /* uint */ - ETHTOOL_A_TS_STAT_TX_ERR, /* uint */ - - /* add new constants above here */ - __ETHTOOL_A_TS_STAT_CNT, - ETHTOOL_A_TS_STAT_MAX = (__ETHTOOL_A_TS_STAT_CNT - 1) - -}; - -/* PHC VCLOCKS */ - -enum { - ETHTOOL_A_PHC_VCLOCKS_UNSPEC, - ETHTOOL_A_PHC_VCLOCKS_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_PHC_VCLOCKS_NUM, /* u32 */ - ETHTOOL_A_PHC_VCLOCKS_INDEX, /* array, s32 */ - - /* add new constants above here */ - __ETHTOOL_A_PHC_VCLOCKS_CNT, - ETHTOOL_A_PHC_VCLOCKS_MAX = (__ETHTOOL_A_PHC_VCLOCKS_CNT - 1) -}; - -/* CABLE TEST */ - -enum { - ETHTOOL_A_CABLE_TEST_UNSPEC, - ETHTOOL_A_CABLE_TEST_HEADER, /* nest - _A_HEADER_* */ - - /* add new constants above here */ - __ETHTOOL_A_CABLE_TEST_CNT, - ETHTOOL_A_CABLE_TEST_MAX = __ETHTOOL_A_CABLE_TEST_CNT - 1 -}; - /* CABLE TEST NOTIFY */ enum { ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC, @@ -583,73 +50,11 @@ enum { }; enum { - ETHTOOL_A_CABLE_RESULT_UNSPEC, - ETHTOOL_A_CABLE_RESULT_PAIR, /* u8 ETHTOOL_A_CABLE_PAIR_ */ - ETHTOOL_A_CABLE_RESULT_CODE, /* u8 ETHTOOL_A_CABLE_RESULT_CODE_ */ - ETHTOOL_A_CABLE_RESULT_SRC, /* u32 ETHTOOL_A_CABLE_INF_SRC_ */ - - __ETHTOOL_A_CABLE_RESULT_CNT, - ETHTOOL_A_CABLE_RESULT_MAX = (__ETHTOOL_A_CABLE_RESULT_CNT - 1) -}; - -enum { - ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC, - ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR, /* u8 ETHTOOL_A_CABLE_PAIR_ */ - ETHTOOL_A_CABLE_FAULT_LENGTH_CM, /* u32 */ - ETHTOOL_A_CABLE_FAULT_LENGTH_SRC, /* u32 ETHTOOL_A_CABLE_INF_SRC_ */ - - __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT, - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = (__ETHTOOL_A_CABLE_FAULT_LENGTH_CNT - 1) -}; - -enum { ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC, ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED, ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED }; -enum { - ETHTOOL_A_CABLE_NEST_UNSPEC, - ETHTOOL_A_CABLE_NEST_RESULT, /* nest - ETHTOOL_A_CABLE_RESULT_ */ - ETHTOOL_A_CABLE_NEST_FAULT_LENGTH, /* nest - ETHTOOL_A_CABLE_FAULT_LENGTH_ */ - __ETHTOOL_A_CABLE_NEST_CNT, - ETHTOOL_A_CABLE_NEST_MAX = (__ETHTOOL_A_CABLE_NEST_CNT - 1) -}; - -enum { - ETHTOOL_A_CABLE_TEST_NTF_UNSPEC, - ETHTOOL_A_CABLE_TEST_NTF_HEADER, /* nest - ETHTOOL_A_HEADER_* */ - ETHTOOL_A_CABLE_TEST_NTF_STATUS, /* u8 - _STARTED/_COMPLETE */ - ETHTOOL_A_CABLE_TEST_NTF_NEST, /* nest - of results: */ - - __ETHTOOL_A_CABLE_TEST_NTF_CNT, - ETHTOOL_A_CABLE_TEST_NTF_MAX = (__ETHTOOL_A_CABLE_TEST_NTF_CNT - 1) -}; - -/* CABLE TEST TDR */ - -enum { - ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC, - ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST, /* u32 */ - ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST, /* u32 */ - ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP, /* u32 */ - ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR, /* u8 */ - - /* add new constants above here */ - __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT, - ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT - 1 -}; - -enum { - ETHTOOL_A_CABLE_TEST_TDR_UNSPEC, - ETHTOOL_A_CABLE_TEST_TDR_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_CABLE_TEST_TDR_CFG, /* nest - *_TDR_CFG_* */ - - /* add new constants above here */ - __ETHTOOL_A_CABLE_TEST_TDR_CNT, - ETHTOOL_A_CABLE_TEST_TDR_MAX = __ETHTOOL_A_CABLE_TEST_TDR_CNT - 1 -}; - /* CABLE TEST TDR NOTIFY */ enum { @@ -690,163 +95,17 @@ enum { }; enum { - ETHTOOL_A_CABLE_TEST_TDR_NTF_UNSPEC, - ETHTOOL_A_CABLE_TEST_TDR_NTF_HEADER, /* nest - ETHTOOL_A_HEADER_* */ - ETHTOOL_A_CABLE_TEST_TDR_NTF_STATUS, /* u8 - _STARTED/_COMPLETE */ - ETHTOOL_A_CABLE_TEST_TDR_NTF_NEST, /* nest - of results: */ - - /* add new constants above here */ - __ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT, - ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX = __ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT - 1 -}; - -/* TUNNEL INFO */ - -enum { - ETHTOOL_UDP_TUNNEL_TYPE_VXLAN, - ETHTOOL_UDP_TUNNEL_TYPE_GENEVE, - ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE, - - __ETHTOOL_UDP_TUNNEL_TYPE_CNT -}; - -enum { - ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC, - - ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, /* be16 */ - ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, /* u32 */ - - /* add new constants above here */ - __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT, - ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = (__ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT - 1) -}; - -enum { - ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC, - - ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, /* u32 */ - ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, /* bitset */ - ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY, /* nest - _UDP_ENTRY_* */ - - /* add new constants above here */ - __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT, - ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = (__ETHTOOL_A_TUNNEL_UDP_TABLE_CNT - 1) -}; - -enum { - ETHTOOL_A_TUNNEL_UDP_UNSPEC, - - ETHTOOL_A_TUNNEL_UDP_TABLE, /* nest - _UDP_TABLE_* */ - - /* add new constants above here */ - __ETHTOOL_A_TUNNEL_UDP_CNT, - ETHTOOL_A_TUNNEL_UDP_MAX = (__ETHTOOL_A_TUNNEL_UDP_CNT - 1) -}; - -enum { - ETHTOOL_A_TUNNEL_INFO_UNSPEC, - ETHTOOL_A_TUNNEL_INFO_HEADER, /* nest - _A_HEADER_* */ - - ETHTOOL_A_TUNNEL_INFO_UDP_PORTS, /* nest - _UDP_TABLE */ - - /* add new constants above here */ - __ETHTOOL_A_TUNNEL_INFO_CNT, - ETHTOOL_A_TUNNEL_INFO_MAX = (__ETHTOOL_A_TUNNEL_INFO_CNT - 1) -}; - -/* FEC */ - -enum { - ETHTOOL_A_FEC_UNSPEC, - ETHTOOL_A_FEC_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_FEC_MODES, /* bitset */ - ETHTOOL_A_FEC_AUTO, /* u8 */ - ETHTOOL_A_FEC_ACTIVE, /* u32 */ - ETHTOOL_A_FEC_STATS, /* nest - _A_FEC_STAT */ - - __ETHTOOL_A_FEC_CNT, - ETHTOOL_A_FEC_MAX = (__ETHTOOL_A_FEC_CNT - 1) -}; - -enum { - ETHTOOL_A_FEC_STAT_UNSPEC, - ETHTOOL_A_FEC_STAT_PAD, - - ETHTOOL_A_FEC_STAT_CORRECTED, /* array, u64 */ - ETHTOOL_A_FEC_STAT_UNCORR, /* array, u64 */ - ETHTOOL_A_FEC_STAT_CORR_BITS, /* array, u64 */ - - /* add new constants above here */ - __ETHTOOL_A_FEC_STAT_CNT, - ETHTOOL_A_FEC_STAT_MAX = (__ETHTOOL_A_FEC_STAT_CNT - 1) -}; - -/* MODULE EEPROM */ - -enum { - ETHTOOL_A_MODULE_EEPROM_UNSPEC, - ETHTOOL_A_MODULE_EEPROM_HEADER, /* nest - _A_HEADER_* */ - - ETHTOOL_A_MODULE_EEPROM_OFFSET, /* u32 */ - ETHTOOL_A_MODULE_EEPROM_LENGTH, /* u32 */ - ETHTOOL_A_MODULE_EEPROM_PAGE, /* u8 */ - ETHTOOL_A_MODULE_EEPROM_BANK, /* u8 */ - ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS, /* u8 */ - ETHTOOL_A_MODULE_EEPROM_DATA, /* binary */ - - __ETHTOOL_A_MODULE_EEPROM_CNT, - ETHTOOL_A_MODULE_EEPROM_MAX = (__ETHTOOL_A_MODULE_EEPROM_CNT - 1) -}; - -/* STATS */ - -enum { - ETHTOOL_A_STATS_UNSPEC, - ETHTOOL_A_STATS_PAD, - ETHTOOL_A_STATS_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_STATS_GROUPS, /* bitset */ - - ETHTOOL_A_STATS_GRP, /* nest - _A_STATS_GRP_* */ - - ETHTOOL_A_STATS_SRC, /* u32 */ - - /* add new constants above here */ - __ETHTOOL_A_STATS_CNT, - ETHTOOL_A_STATS_MAX = (__ETHTOOL_A_STATS_CNT - 1) -}; - -enum { ETHTOOL_STATS_ETH_PHY, ETHTOOL_STATS_ETH_MAC, ETHTOOL_STATS_ETH_CTRL, ETHTOOL_STATS_RMON, + ETHTOOL_STATS_PHY, /* add new constants above here */ __ETHTOOL_STATS_CNT }; enum { - ETHTOOL_A_STATS_GRP_UNSPEC, - ETHTOOL_A_STATS_GRP_PAD, - - ETHTOOL_A_STATS_GRP_ID, /* u32 */ - ETHTOOL_A_STATS_GRP_SS_ID, /* u32 */ - - ETHTOOL_A_STATS_GRP_STAT, /* nest */ - - ETHTOOL_A_STATS_GRP_HIST_RX, /* nest */ - ETHTOOL_A_STATS_GRP_HIST_TX, /* nest */ - - ETHTOOL_A_STATS_GRP_HIST_BKT_LOW, /* u32 */ - ETHTOOL_A_STATS_GRP_HIST_BKT_HI, /* u32 */ - ETHTOOL_A_STATS_GRP_HIST_VAL, /* u64 */ - - /* add new constants above here */ - __ETHTOOL_A_STATS_GRP_CNT, - ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_GRP_CNT - 1) -}; - -enum { /* 30.3.2.1.5 aSymbolErrorDuringCarrier */ ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR, @@ -935,154 +194,18 @@ enum { ETHTOOL_A_STATS_RMON_MAX = (__ETHTOOL_A_STATS_RMON_CNT - 1) }; -/* MODULE */ - -enum { - ETHTOOL_A_MODULE_UNSPEC, - ETHTOOL_A_MODULE_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_MODULE_POWER_MODE_POLICY, /* u8 */ - ETHTOOL_A_MODULE_POWER_MODE, /* u8 */ - - /* add new constants above here */ - __ETHTOOL_A_MODULE_CNT, - ETHTOOL_A_MODULE_MAX = (__ETHTOOL_A_MODULE_CNT - 1) -}; - -/* Power Sourcing Equipment */ -enum { - ETHTOOL_A_C33_PSE_PW_LIMIT_UNSPEC, - ETHTOOL_A_C33_PSE_PW_LIMIT_MIN, /* u32 */ - ETHTOOL_A_C33_PSE_PW_LIMIT_MAX, /* u32 */ -}; - -enum { - ETHTOOL_A_PSE_UNSPEC, - ETHTOOL_A_PSE_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_PODL_PSE_ADMIN_STATE, /* u32 */ - ETHTOOL_A_PODL_PSE_ADMIN_CONTROL, /* u32 */ - ETHTOOL_A_PODL_PSE_PW_D_STATUS, /* u32 */ - ETHTOOL_A_C33_PSE_ADMIN_STATE, /* u32 */ - ETHTOOL_A_C33_PSE_ADMIN_CONTROL, /* u32 */ - ETHTOOL_A_C33_PSE_PW_D_STATUS, /* u32 */ - ETHTOOL_A_C33_PSE_PW_CLASS, /* u32 */ - ETHTOOL_A_C33_PSE_ACTUAL_PW, /* u32 */ - ETHTOOL_A_C33_PSE_EXT_STATE, /* u32 */ - ETHTOOL_A_C33_PSE_EXT_SUBSTATE, /* u32 */ - ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT, /* u32 */ - ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES, /* nest - _C33_PSE_PW_LIMIT_* */ - - /* add new constants above here */ - __ETHTOOL_A_PSE_CNT, - ETHTOOL_A_PSE_MAX = (__ETHTOOL_A_PSE_CNT - 1) -}; - -enum { - ETHTOOL_A_RSS_UNSPEC, - ETHTOOL_A_RSS_HEADER, - ETHTOOL_A_RSS_CONTEXT, /* u32 */ - ETHTOOL_A_RSS_HFUNC, /* u32 */ - ETHTOOL_A_RSS_INDIR, /* binary */ - ETHTOOL_A_RSS_HKEY, /* binary */ - ETHTOOL_A_RSS_INPUT_XFRM, /* u32 */ - ETHTOOL_A_RSS_START_CONTEXT, /* u32 */ - - __ETHTOOL_A_RSS_CNT, - ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1), -}; - -/* PLCA */ - -enum { - ETHTOOL_A_PLCA_UNSPEC, - ETHTOOL_A_PLCA_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_PLCA_VERSION, /* u16 */ - ETHTOOL_A_PLCA_ENABLED, /* u8 */ - ETHTOOL_A_PLCA_STATUS, /* u8 */ - ETHTOOL_A_PLCA_NODE_CNT, /* u32 */ - ETHTOOL_A_PLCA_NODE_ID, /* u32 */ - ETHTOOL_A_PLCA_TO_TMR, /* u32 */ - ETHTOOL_A_PLCA_BURST_CNT, /* u32 */ - ETHTOOL_A_PLCA_BURST_TMR, /* u32 */ - - /* add new constants above here */ - __ETHTOOL_A_PLCA_CNT, - ETHTOOL_A_PLCA_MAX = (__ETHTOOL_A_PLCA_CNT - 1) -}; - -/* MAC Merge (802.3) */ - -enum { - ETHTOOL_A_MM_STAT_UNSPEC, - ETHTOOL_A_MM_STAT_PAD, - - /* aMACMergeFrameAssErrorCount */ - ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS, /* u64 */ - /* aMACMergeFrameSmdErrorCount */ - ETHTOOL_A_MM_STAT_SMD_ERRORS, /* u64 */ - /* aMACMergeFrameAssOkCount */ - ETHTOOL_A_MM_STAT_REASSEMBLY_OK, /* u64 */ - /* aMACMergeFragCountRx */ - ETHTOOL_A_MM_STAT_RX_FRAG_COUNT, /* u64 */ - /* aMACMergeFragCountTx */ - ETHTOOL_A_MM_STAT_TX_FRAG_COUNT, /* u64 */ - /* aMACMergeHoldCount */ - ETHTOOL_A_MM_STAT_HOLD_COUNT, /* u64 */ - - /* add new constants above here */ - __ETHTOOL_A_MM_STAT_CNT, - ETHTOOL_A_MM_STAT_MAX = (__ETHTOOL_A_MM_STAT_CNT - 1) -}; - -enum { - ETHTOOL_A_MM_UNSPEC, - ETHTOOL_A_MM_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_MM_PMAC_ENABLED, /* u8 */ - ETHTOOL_A_MM_TX_ENABLED, /* u8 */ - ETHTOOL_A_MM_TX_ACTIVE, /* u8 */ - ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, /* u32 */ - ETHTOOL_A_MM_RX_MIN_FRAG_SIZE, /* u32 */ - ETHTOOL_A_MM_VERIFY_ENABLED, /* u8 */ - ETHTOOL_A_MM_VERIFY_STATUS, /* u8 */ - ETHTOOL_A_MM_VERIFY_TIME, /* u32 */ - ETHTOOL_A_MM_MAX_VERIFY_TIME, /* u32 */ - ETHTOOL_A_MM_STATS, /* nest - _A_MM_STAT_* */ - - /* add new constants above here */ - __ETHTOOL_A_MM_CNT, - ETHTOOL_A_MM_MAX = (__ETHTOOL_A_MM_CNT - 1) -}; - -/* MODULE_FW_FLASH */ - -enum { - ETHTOOL_A_MODULE_FW_FLASH_UNSPEC, - ETHTOOL_A_MODULE_FW_FLASH_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME, /* string */ - ETHTOOL_A_MODULE_FW_FLASH_PASSWORD, /* u32 */ - ETHTOOL_A_MODULE_FW_FLASH_STATUS, /* u32 */ - ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG, /* string */ - ETHTOOL_A_MODULE_FW_FLASH_DONE, /* uint */ - ETHTOOL_A_MODULE_FW_FLASH_TOTAL, /* uint */ - - /* add new constants above here */ - __ETHTOOL_A_MODULE_FW_FLASH_CNT, - ETHTOOL_A_MODULE_FW_FLASH_MAX = (__ETHTOOL_A_MODULE_FW_FLASH_CNT - 1) -}; - enum { - ETHTOOL_A_PHY_UNSPEC, - ETHTOOL_A_PHY_HEADER, /* nest - _A_HEADER_* */ - ETHTOOL_A_PHY_INDEX, /* u32 */ - ETHTOOL_A_PHY_DRVNAME, /* string */ - ETHTOOL_A_PHY_NAME, /* string */ - ETHTOOL_A_PHY_UPSTREAM_TYPE, /* u32 */ - ETHTOOL_A_PHY_UPSTREAM_INDEX, /* u32 */ - ETHTOOL_A_PHY_UPSTREAM_SFP_NAME, /* string */ - ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME, /* string */ + /* Basic packet counters if PHY has separate counters from the MAC */ + ETHTOOL_A_STATS_PHY_RX_PKTS, + ETHTOOL_A_STATS_PHY_RX_BYTES, + ETHTOOL_A_STATS_PHY_RX_ERRORS, + ETHTOOL_A_STATS_PHY_TX_PKTS, + ETHTOOL_A_STATS_PHY_TX_BYTES, + ETHTOOL_A_STATS_PHY_TX_ERRORS, /* add new constants above here */ - __ETHTOOL_A_PHY_CNT, - ETHTOOL_A_PHY_MAX = (__ETHTOOL_A_PHY_CNT - 1) + __ETHTOOL_A_STATS_PHY_CNT, + ETHTOOL_A_STATS_PHY_MAX = (__ETHTOOL_A_STATS_PHY_CNT - 1) }; /* generic netlink info */ diff --git a/include/uapi/linux/ethtool_netlink_generated.h b/include/uapi/linux/ethtool_netlink_generated.h new file mode 100644 index 000000000000..fe24c3459ac0 --- /dev/null +++ b/include/uapi/linux/ethtool_netlink_generated.h @@ -0,0 +1,821 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/ethtool.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_ETHTOOL_NETLINK_GENERATED_H +#define _UAPI_LINUX_ETHTOOL_NETLINK_GENERATED_H + +#define ETHTOOL_FAMILY_NAME "ethtool" +#define ETHTOOL_FAMILY_VERSION 1 + +enum { + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN, + ETHTOOL_UDP_TUNNEL_TYPE_GENEVE, + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE, + + /* private: */ + __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + ETHTOOL_UDP_TUNNEL_TYPE_MAX = (__ETHTOOL_UDP_TUNNEL_TYPE_CNT - 1) +}; + +/** + * enum ethtool_header_flags - common ethtool header flags + * @ETHTOOL_FLAG_COMPACT_BITSETS: use compact bitsets in reply + * @ETHTOOL_FLAG_OMIT_REPLY: provide optional reply for SET or ACT requests + * @ETHTOOL_FLAG_STATS: request statistics, if supported by the driver + */ +enum ethtool_header_flags { + ETHTOOL_FLAG_COMPACT_BITSETS = 1, + ETHTOOL_FLAG_OMIT_REPLY = 2, + ETHTOOL_FLAG_STATS = 4, +}; + +enum { + ETHTOOL_PHY_UPSTREAM_TYPE_MAC, + ETHTOOL_PHY_UPSTREAM_TYPE_PHY, +}; + +enum ethtool_tcp_data_split { + ETHTOOL_TCP_DATA_SPLIT_UNKNOWN, + ETHTOOL_TCP_DATA_SPLIT_DISABLED, + ETHTOOL_TCP_DATA_SPLIT_ENABLED, +}; + +enum { + ETHTOOL_A_HEADER_UNSPEC, + ETHTOOL_A_HEADER_DEV_INDEX, + ETHTOOL_A_HEADER_DEV_NAME, + ETHTOOL_A_HEADER_FLAGS, + ETHTOOL_A_HEADER_PHY_INDEX, + + __ETHTOOL_A_HEADER_CNT, + ETHTOOL_A_HEADER_MAX = (__ETHTOOL_A_HEADER_CNT - 1) +}; + +enum { + ETHTOOL_A_BITSET_BIT_UNSPEC, + ETHTOOL_A_BITSET_BIT_INDEX, + ETHTOOL_A_BITSET_BIT_NAME, + ETHTOOL_A_BITSET_BIT_VALUE, + + __ETHTOOL_A_BITSET_BIT_CNT, + ETHTOOL_A_BITSET_BIT_MAX = (__ETHTOOL_A_BITSET_BIT_CNT - 1) +}; + +enum { + ETHTOOL_A_BITSET_BITS_UNSPEC, + ETHTOOL_A_BITSET_BITS_BIT, + + __ETHTOOL_A_BITSET_BITS_CNT, + ETHTOOL_A_BITSET_BITS_MAX = (__ETHTOOL_A_BITSET_BITS_CNT - 1) +}; + +enum { + ETHTOOL_A_BITSET_UNSPEC, + ETHTOOL_A_BITSET_NOMASK, + ETHTOOL_A_BITSET_SIZE, + ETHTOOL_A_BITSET_BITS, + ETHTOOL_A_BITSET_VALUE, + ETHTOOL_A_BITSET_MASK, + + __ETHTOOL_A_BITSET_CNT, + ETHTOOL_A_BITSET_MAX = (__ETHTOOL_A_BITSET_CNT - 1) +}; + +enum { + ETHTOOL_A_STRING_UNSPEC, + ETHTOOL_A_STRING_INDEX, + ETHTOOL_A_STRING_VALUE, + + __ETHTOOL_A_STRING_CNT, + ETHTOOL_A_STRING_MAX = (__ETHTOOL_A_STRING_CNT - 1) +}; + +enum { + ETHTOOL_A_STRINGS_UNSPEC, + ETHTOOL_A_STRINGS_STRING, + + __ETHTOOL_A_STRINGS_CNT, + ETHTOOL_A_STRINGS_MAX = (__ETHTOOL_A_STRINGS_CNT - 1) +}; + +enum { + ETHTOOL_A_STRINGSET_UNSPEC, + ETHTOOL_A_STRINGSET_ID, + ETHTOOL_A_STRINGSET_COUNT, + ETHTOOL_A_STRINGSET_STRINGS, + + __ETHTOOL_A_STRINGSET_CNT, + ETHTOOL_A_STRINGSET_MAX = (__ETHTOOL_A_STRINGSET_CNT - 1) +}; + +enum { + ETHTOOL_A_STRINGSETS_UNSPEC, + ETHTOOL_A_STRINGSETS_STRINGSET, + + __ETHTOOL_A_STRINGSETS_CNT, + ETHTOOL_A_STRINGSETS_MAX = (__ETHTOOL_A_STRINGSETS_CNT - 1) +}; + +enum { + ETHTOOL_A_STRSET_UNSPEC, + ETHTOOL_A_STRSET_HEADER, + ETHTOOL_A_STRSET_STRINGSETS, + ETHTOOL_A_STRSET_COUNTS_ONLY, + + __ETHTOOL_A_STRSET_CNT, + ETHTOOL_A_STRSET_MAX = (__ETHTOOL_A_STRSET_CNT - 1) +}; + +enum { + ETHTOOL_A_PRIVFLAGS_UNSPEC, + ETHTOOL_A_PRIVFLAGS_HEADER, + ETHTOOL_A_PRIVFLAGS_FLAGS, + + __ETHTOOL_A_PRIVFLAGS_CNT, + ETHTOOL_A_PRIVFLAGS_MAX = (__ETHTOOL_A_PRIVFLAGS_CNT - 1) +}; + +enum { + ETHTOOL_A_RINGS_UNSPEC, + ETHTOOL_A_RINGS_HEADER, + ETHTOOL_A_RINGS_RX_MAX, + ETHTOOL_A_RINGS_RX_MINI_MAX, + ETHTOOL_A_RINGS_RX_JUMBO_MAX, + ETHTOOL_A_RINGS_TX_MAX, + ETHTOOL_A_RINGS_RX, + ETHTOOL_A_RINGS_RX_MINI, + ETHTOOL_A_RINGS_RX_JUMBO, + ETHTOOL_A_RINGS_TX, + ETHTOOL_A_RINGS_RX_BUF_LEN, + ETHTOOL_A_RINGS_TCP_DATA_SPLIT, + ETHTOOL_A_RINGS_CQE_SIZE, + ETHTOOL_A_RINGS_TX_PUSH, + ETHTOOL_A_RINGS_RX_PUSH, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, + ETHTOOL_A_RINGS_HDS_THRESH, + ETHTOOL_A_RINGS_HDS_THRESH_MAX, + + __ETHTOOL_A_RINGS_CNT, + ETHTOOL_A_RINGS_MAX = (__ETHTOOL_A_RINGS_CNT - 1) +}; + +enum { + ETHTOOL_A_MM_STAT_UNSPEC, + ETHTOOL_A_MM_STAT_PAD, + ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS, + ETHTOOL_A_MM_STAT_SMD_ERRORS, + ETHTOOL_A_MM_STAT_REASSEMBLY_OK, + ETHTOOL_A_MM_STAT_RX_FRAG_COUNT, + ETHTOOL_A_MM_STAT_TX_FRAG_COUNT, + ETHTOOL_A_MM_STAT_HOLD_COUNT, + + __ETHTOOL_A_MM_STAT_CNT, + ETHTOOL_A_MM_STAT_MAX = (__ETHTOOL_A_MM_STAT_CNT - 1) +}; + +enum { + ETHTOOL_A_MM_UNSPEC, + ETHTOOL_A_MM_HEADER, + ETHTOOL_A_MM_PMAC_ENABLED, + ETHTOOL_A_MM_TX_ENABLED, + ETHTOOL_A_MM_TX_ACTIVE, + ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, + ETHTOOL_A_MM_RX_MIN_FRAG_SIZE, + ETHTOOL_A_MM_VERIFY_ENABLED, + ETHTOOL_A_MM_VERIFY_STATUS, + ETHTOOL_A_MM_VERIFY_TIME, + ETHTOOL_A_MM_MAX_VERIFY_TIME, + ETHTOOL_A_MM_STATS, + + __ETHTOOL_A_MM_CNT, + ETHTOOL_A_MM_MAX = (__ETHTOOL_A_MM_CNT - 1) +}; + +enum { + ETHTOOL_A_LINKINFO_UNSPEC, + ETHTOOL_A_LINKINFO_HEADER, + ETHTOOL_A_LINKINFO_PORT, + ETHTOOL_A_LINKINFO_PHYADDR, + ETHTOOL_A_LINKINFO_TP_MDIX, + ETHTOOL_A_LINKINFO_TP_MDIX_CTRL, + ETHTOOL_A_LINKINFO_TRANSCEIVER, + + __ETHTOOL_A_LINKINFO_CNT, + ETHTOOL_A_LINKINFO_MAX = (__ETHTOOL_A_LINKINFO_CNT - 1) +}; + +enum { + ETHTOOL_A_LINKMODES_UNSPEC, + ETHTOOL_A_LINKMODES_HEADER, + ETHTOOL_A_LINKMODES_AUTONEG, + ETHTOOL_A_LINKMODES_OURS, + ETHTOOL_A_LINKMODES_PEER, + ETHTOOL_A_LINKMODES_SPEED, + ETHTOOL_A_LINKMODES_DUPLEX, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE, + ETHTOOL_A_LINKMODES_LANES, + ETHTOOL_A_LINKMODES_RATE_MATCHING, + + __ETHTOOL_A_LINKMODES_CNT, + ETHTOOL_A_LINKMODES_MAX = (__ETHTOOL_A_LINKMODES_CNT - 1) +}; + +enum { + ETHTOOL_A_LINKSTATE_UNSPEC, + ETHTOOL_A_LINKSTATE_HEADER, + ETHTOOL_A_LINKSTATE_LINK, + ETHTOOL_A_LINKSTATE_SQI, + ETHTOOL_A_LINKSTATE_SQI_MAX, + ETHTOOL_A_LINKSTATE_EXT_STATE, + ETHTOOL_A_LINKSTATE_EXT_SUBSTATE, + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT, + + __ETHTOOL_A_LINKSTATE_CNT, + ETHTOOL_A_LINKSTATE_MAX = (__ETHTOOL_A_LINKSTATE_CNT - 1) +}; + +enum { + ETHTOOL_A_DEBUG_UNSPEC, + ETHTOOL_A_DEBUG_HEADER, + ETHTOOL_A_DEBUG_MSGMASK, + + __ETHTOOL_A_DEBUG_CNT, + ETHTOOL_A_DEBUG_MAX = (__ETHTOOL_A_DEBUG_CNT - 1) +}; + +enum { + ETHTOOL_A_WOL_UNSPEC, + ETHTOOL_A_WOL_HEADER, + ETHTOOL_A_WOL_MODES, + ETHTOOL_A_WOL_SOPASS, + + __ETHTOOL_A_WOL_CNT, + ETHTOOL_A_WOL_MAX = (__ETHTOOL_A_WOL_CNT - 1) +}; + +enum { + ETHTOOL_A_FEATURES_UNSPEC, + ETHTOOL_A_FEATURES_HEADER, + ETHTOOL_A_FEATURES_HW, + ETHTOOL_A_FEATURES_WANTED, + ETHTOOL_A_FEATURES_ACTIVE, + ETHTOOL_A_FEATURES_NOCHANGE, + + __ETHTOOL_A_FEATURES_CNT, + ETHTOOL_A_FEATURES_MAX = (__ETHTOOL_A_FEATURES_CNT - 1) +}; + +enum { + ETHTOOL_A_CHANNELS_UNSPEC, + ETHTOOL_A_CHANNELS_HEADER, + ETHTOOL_A_CHANNELS_RX_MAX, + ETHTOOL_A_CHANNELS_TX_MAX, + ETHTOOL_A_CHANNELS_OTHER_MAX, + ETHTOOL_A_CHANNELS_COMBINED_MAX, + ETHTOOL_A_CHANNELS_RX_COUNT, + ETHTOOL_A_CHANNELS_TX_COUNT, + ETHTOOL_A_CHANNELS_OTHER_COUNT, + ETHTOOL_A_CHANNELS_COMBINED_COUNT, + + __ETHTOOL_A_CHANNELS_CNT, + ETHTOOL_A_CHANNELS_MAX = (__ETHTOOL_A_CHANNELS_CNT - 1) +}; + +enum { + ETHTOOL_A_IRQ_MODERATION_UNSPEC, + ETHTOOL_A_IRQ_MODERATION_USEC, + ETHTOOL_A_IRQ_MODERATION_PKTS, + ETHTOOL_A_IRQ_MODERATION_COMPS, + + __ETHTOOL_A_IRQ_MODERATION_CNT, + ETHTOOL_A_IRQ_MODERATION_MAX = (__ETHTOOL_A_IRQ_MODERATION_CNT - 1) +}; + +enum { + ETHTOOL_A_PROFILE_UNSPEC, + ETHTOOL_A_PROFILE_IRQ_MODERATION, + + __ETHTOOL_A_PROFILE_CNT, + ETHTOOL_A_PROFILE_MAX = (__ETHTOOL_A_PROFILE_CNT - 1) +}; + +enum { + ETHTOOL_A_COALESCE_UNSPEC, + ETHTOOL_A_COALESCE_HEADER, + ETHTOOL_A_COALESCE_RX_USECS, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES, + ETHTOOL_A_COALESCE_RX_USECS_IRQ, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ, + ETHTOOL_A_COALESCE_TX_USECS, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES, + ETHTOOL_A_COALESCE_TX_USECS_IRQ, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ, + ETHTOOL_A_COALESCE_STATS_BLOCK_USECS, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX, + ETHTOOL_A_COALESCE_PKT_RATE_LOW, + ETHTOOL_A_COALESCE_RX_USECS_LOW, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW, + ETHTOOL_A_COALESCE_TX_USECS_LOW, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW, + ETHTOOL_A_COALESCE_PKT_RATE_HIGH, + ETHTOOL_A_COALESCE_RX_USECS_HIGH, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH, + ETHTOOL_A_COALESCE_TX_USECS_HIGH, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH, + ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, + ETHTOOL_A_COALESCE_USE_CQE_MODE_TX, + ETHTOOL_A_COALESCE_USE_CQE_MODE_RX, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES, + ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, + ETHTOOL_A_COALESCE_RX_PROFILE, + ETHTOOL_A_COALESCE_TX_PROFILE, + + __ETHTOOL_A_COALESCE_CNT, + ETHTOOL_A_COALESCE_MAX = (__ETHTOOL_A_COALESCE_CNT - 1) +}; + +enum { + ETHTOOL_A_PAUSE_STAT_UNSPEC, + ETHTOOL_A_PAUSE_STAT_PAD, + ETHTOOL_A_PAUSE_STAT_TX_FRAMES, + ETHTOOL_A_PAUSE_STAT_RX_FRAMES, + + __ETHTOOL_A_PAUSE_STAT_CNT, + ETHTOOL_A_PAUSE_STAT_MAX = (__ETHTOOL_A_PAUSE_STAT_CNT - 1) +}; + +enum { + ETHTOOL_A_PAUSE_UNSPEC, + ETHTOOL_A_PAUSE_HEADER, + ETHTOOL_A_PAUSE_AUTONEG, + ETHTOOL_A_PAUSE_RX, + ETHTOOL_A_PAUSE_TX, + ETHTOOL_A_PAUSE_STATS, + ETHTOOL_A_PAUSE_STATS_SRC, + + __ETHTOOL_A_PAUSE_CNT, + ETHTOOL_A_PAUSE_MAX = (__ETHTOOL_A_PAUSE_CNT - 1) +}; + +enum { + ETHTOOL_A_EEE_UNSPEC, + ETHTOOL_A_EEE_HEADER, + ETHTOOL_A_EEE_MODES_OURS, + ETHTOOL_A_EEE_MODES_PEER, + ETHTOOL_A_EEE_ACTIVE, + ETHTOOL_A_EEE_ENABLED, + ETHTOOL_A_EEE_TX_LPI_ENABLED, + ETHTOOL_A_EEE_TX_LPI_TIMER, + + __ETHTOOL_A_EEE_CNT, + ETHTOOL_A_EEE_MAX = (__ETHTOOL_A_EEE_CNT - 1) +}; + +enum { + ETHTOOL_A_TS_STAT_UNSPEC, + ETHTOOL_A_TS_STAT_TX_PKTS, + ETHTOOL_A_TS_STAT_TX_LOST, + ETHTOOL_A_TS_STAT_TX_ERR, + ETHTOOL_A_TS_STAT_TX_ONESTEP_PKTS_UNCONFIRMED, + + __ETHTOOL_A_TS_STAT_CNT, + ETHTOOL_A_TS_STAT_MAX = (__ETHTOOL_A_TS_STAT_CNT - 1) +}; + +enum { + ETHTOOL_A_TS_HWTSTAMP_PROVIDER_UNSPEC, + ETHTOOL_A_TS_HWTSTAMP_PROVIDER_INDEX, + ETHTOOL_A_TS_HWTSTAMP_PROVIDER_QUALIFIER, + + __ETHTOOL_A_TS_HWTSTAMP_PROVIDER_CNT, + ETHTOOL_A_TS_HWTSTAMP_PROVIDER_MAX = (__ETHTOOL_A_TS_HWTSTAMP_PROVIDER_CNT - 1) +}; + +enum { + ETHTOOL_A_TSINFO_UNSPEC, + ETHTOOL_A_TSINFO_HEADER, + ETHTOOL_A_TSINFO_TIMESTAMPING, + ETHTOOL_A_TSINFO_TX_TYPES, + ETHTOOL_A_TSINFO_RX_FILTERS, + ETHTOOL_A_TSINFO_PHC_INDEX, + ETHTOOL_A_TSINFO_STATS, + ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER, + + __ETHTOOL_A_TSINFO_CNT, + ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_RESULT_UNSPEC, + ETHTOOL_A_CABLE_RESULT_PAIR, + ETHTOOL_A_CABLE_RESULT_CODE, + ETHTOOL_A_CABLE_RESULT_SRC, + + __ETHTOOL_A_CABLE_RESULT_CNT, + ETHTOOL_A_CABLE_RESULT_MAX = (__ETHTOOL_A_CABLE_RESULT_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC, + ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR, + ETHTOOL_A_CABLE_FAULT_LENGTH_CM, + ETHTOOL_A_CABLE_FAULT_LENGTH_SRC, + + __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT, + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = (__ETHTOOL_A_CABLE_FAULT_LENGTH_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_NEST_UNSPEC, + ETHTOOL_A_CABLE_NEST_RESULT, + ETHTOOL_A_CABLE_NEST_FAULT_LENGTH, + + __ETHTOOL_A_CABLE_NEST_CNT, + ETHTOOL_A_CABLE_NEST_MAX = (__ETHTOOL_A_CABLE_NEST_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_TEST_UNSPEC, + ETHTOOL_A_CABLE_TEST_HEADER, + + __ETHTOOL_A_CABLE_TEST_CNT, + ETHTOOL_A_CABLE_TEST_MAX = (__ETHTOOL_A_CABLE_TEST_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_UNSPEC, + ETHTOOL_A_CABLE_TEST_NTF_HEADER, + ETHTOOL_A_CABLE_TEST_NTF_STATUS, + ETHTOOL_A_CABLE_TEST_NTF_NEST, + + __ETHTOOL_A_CABLE_TEST_NTF_CNT, + ETHTOOL_A_CABLE_TEST_NTF_MAX = (__ETHTOOL_A_CABLE_TEST_NTF_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC, + ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST, + ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST, + ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP, + ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR, + + __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT, + ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = (__ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_NTF_UNSPEC, + ETHTOOL_A_CABLE_TEST_TDR_NTF_HEADER, + ETHTOOL_A_CABLE_TEST_TDR_NTF_STATUS, + ETHTOOL_A_CABLE_TEST_TDR_NTF_NEST, + + __ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT, + ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX = (__ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_UNSPEC, + ETHTOOL_A_CABLE_TEST_TDR_HEADER, + ETHTOOL_A_CABLE_TEST_TDR_CFG, + + __ETHTOOL_A_CABLE_TEST_TDR_CNT, + ETHTOOL_A_CABLE_TEST_TDR_MAX = (__ETHTOOL_A_CABLE_TEST_TDR_CNT - 1) +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC, + ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, + ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, + + __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT, + ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = (__ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT - 1) +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC, + ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, + ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, + ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY, + + __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT, + ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = (__ETHTOOL_A_TUNNEL_UDP_TABLE_CNT - 1) +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_UNSPEC, + ETHTOOL_A_TUNNEL_UDP_TABLE, + + __ETHTOOL_A_TUNNEL_UDP_CNT, + ETHTOOL_A_TUNNEL_UDP_MAX = (__ETHTOOL_A_TUNNEL_UDP_CNT - 1) +}; + +enum { + ETHTOOL_A_TUNNEL_INFO_UNSPEC, + ETHTOOL_A_TUNNEL_INFO_HEADER, + ETHTOOL_A_TUNNEL_INFO_UDP_PORTS, + + __ETHTOOL_A_TUNNEL_INFO_CNT, + ETHTOOL_A_TUNNEL_INFO_MAX = (__ETHTOOL_A_TUNNEL_INFO_CNT - 1) +}; + +enum { + ETHTOOL_A_FEC_STAT_UNSPEC, + ETHTOOL_A_FEC_STAT_PAD, + ETHTOOL_A_FEC_STAT_CORRECTED, + ETHTOOL_A_FEC_STAT_UNCORR, + ETHTOOL_A_FEC_STAT_CORR_BITS, + + __ETHTOOL_A_FEC_STAT_CNT, + ETHTOOL_A_FEC_STAT_MAX = (__ETHTOOL_A_FEC_STAT_CNT - 1) +}; + +enum { + ETHTOOL_A_FEC_UNSPEC, + ETHTOOL_A_FEC_HEADER, + ETHTOOL_A_FEC_MODES, + ETHTOOL_A_FEC_AUTO, + ETHTOOL_A_FEC_ACTIVE, + ETHTOOL_A_FEC_STATS, + + __ETHTOOL_A_FEC_CNT, + ETHTOOL_A_FEC_MAX = (__ETHTOOL_A_FEC_CNT - 1) +}; + +enum { + ETHTOOL_A_MODULE_EEPROM_UNSPEC, + ETHTOOL_A_MODULE_EEPROM_HEADER, + ETHTOOL_A_MODULE_EEPROM_OFFSET, + ETHTOOL_A_MODULE_EEPROM_LENGTH, + ETHTOOL_A_MODULE_EEPROM_PAGE, + ETHTOOL_A_MODULE_EEPROM_BANK, + ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS, + ETHTOOL_A_MODULE_EEPROM_DATA, + + __ETHTOOL_A_MODULE_EEPROM_CNT, + ETHTOOL_A_MODULE_EEPROM_MAX = (__ETHTOOL_A_MODULE_EEPROM_CNT - 1) +}; + +enum { + ETHTOOL_A_STATS_GRP_UNSPEC, + ETHTOOL_A_STATS_GRP_PAD, + ETHTOOL_A_STATS_GRP_ID, + ETHTOOL_A_STATS_GRP_SS_ID, + ETHTOOL_A_STATS_GRP_STAT, + ETHTOOL_A_STATS_GRP_HIST_RX, + ETHTOOL_A_STATS_GRP_HIST_TX, + ETHTOOL_A_STATS_GRP_HIST_BKT_LOW, + ETHTOOL_A_STATS_GRP_HIST_BKT_HI, + ETHTOOL_A_STATS_GRP_HIST_VAL, + + __ETHTOOL_A_STATS_GRP_CNT, + ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_GRP_CNT - 1) +}; + +enum { + ETHTOOL_A_STATS_UNSPEC, + ETHTOOL_A_STATS_PAD, + ETHTOOL_A_STATS_HEADER, + ETHTOOL_A_STATS_GROUPS, + ETHTOOL_A_STATS_GRP, + ETHTOOL_A_STATS_SRC, + + __ETHTOOL_A_STATS_CNT, + ETHTOOL_A_STATS_MAX = (__ETHTOOL_A_STATS_CNT - 1) +}; + +enum { + ETHTOOL_A_PHC_VCLOCKS_UNSPEC, + ETHTOOL_A_PHC_VCLOCKS_HEADER, + ETHTOOL_A_PHC_VCLOCKS_NUM, + ETHTOOL_A_PHC_VCLOCKS_INDEX, + + __ETHTOOL_A_PHC_VCLOCKS_CNT, + ETHTOOL_A_PHC_VCLOCKS_MAX = (__ETHTOOL_A_PHC_VCLOCKS_CNT - 1) +}; + +enum { + ETHTOOL_A_MODULE_UNSPEC, + ETHTOOL_A_MODULE_HEADER, + ETHTOOL_A_MODULE_POWER_MODE_POLICY, + ETHTOOL_A_MODULE_POWER_MODE, + + __ETHTOOL_A_MODULE_CNT, + ETHTOOL_A_MODULE_MAX = (__ETHTOOL_A_MODULE_CNT - 1) +}; + +enum { + ETHTOOL_A_C33_PSE_PW_LIMIT_UNSPEC, + ETHTOOL_A_C33_PSE_PW_LIMIT_MIN, + ETHTOOL_A_C33_PSE_PW_LIMIT_MAX, + + __ETHTOOL_A_C33_PSE_PW_LIMIT_CNT, + __ETHTOOL_A_C33_PSE_PW_LIMIT_MAX = (__ETHTOOL_A_C33_PSE_PW_LIMIT_CNT - 1) +}; + +enum { + ETHTOOL_A_PSE_UNSPEC, + ETHTOOL_A_PSE_HEADER, + ETHTOOL_A_PODL_PSE_ADMIN_STATE, + ETHTOOL_A_PODL_PSE_ADMIN_CONTROL, + ETHTOOL_A_PODL_PSE_PW_D_STATUS, + ETHTOOL_A_C33_PSE_ADMIN_STATE, + ETHTOOL_A_C33_PSE_ADMIN_CONTROL, + ETHTOOL_A_C33_PSE_PW_D_STATUS, + ETHTOOL_A_C33_PSE_PW_CLASS, + ETHTOOL_A_C33_PSE_ACTUAL_PW, + ETHTOOL_A_C33_PSE_EXT_STATE, + ETHTOOL_A_C33_PSE_EXT_SUBSTATE, + ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT, + ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES, + + __ETHTOOL_A_PSE_CNT, + ETHTOOL_A_PSE_MAX = (__ETHTOOL_A_PSE_CNT - 1) +}; + +enum { + ETHTOOL_A_RSS_UNSPEC, + ETHTOOL_A_RSS_HEADER, + ETHTOOL_A_RSS_CONTEXT, + ETHTOOL_A_RSS_HFUNC, + ETHTOOL_A_RSS_INDIR, + ETHTOOL_A_RSS_HKEY, + ETHTOOL_A_RSS_INPUT_XFRM, + ETHTOOL_A_RSS_START_CONTEXT, + + __ETHTOOL_A_RSS_CNT, + ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1) +}; + +enum { + ETHTOOL_A_PLCA_UNSPEC, + ETHTOOL_A_PLCA_HEADER, + ETHTOOL_A_PLCA_VERSION, + ETHTOOL_A_PLCA_ENABLED, + ETHTOOL_A_PLCA_STATUS, + ETHTOOL_A_PLCA_NODE_CNT, + ETHTOOL_A_PLCA_NODE_ID, + ETHTOOL_A_PLCA_TO_TMR, + ETHTOOL_A_PLCA_BURST_CNT, + ETHTOOL_A_PLCA_BURST_TMR, + + __ETHTOOL_A_PLCA_CNT, + ETHTOOL_A_PLCA_MAX = (__ETHTOOL_A_PLCA_CNT - 1) +}; + +enum { + ETHTOOL_A_MODULE_FW_FLASH_UNSPEC, + ETHTOOL_A_MODULE_FW_FLASH_HEADER, + ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME, + ETHTOOL_A_MODULE_FW_FLASH_PASSWORD, + ETHTOOL_A_MODULE_FW_FLASH_STATUS, + ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG, + ETHTOOL_A_MODULE_FW_FLASH_DONE, + ETHTOOL_A_MODULE_FW_FLASH_TOTAL, + + __ETHTOOL_A_MODULE_FW_FLASH_CNT, + ETHTOOL_A_MODULE_FW_FLASH_MAX = (__ETHTOOL_A_MODULE_FW_FLASH_CNT - 1) +}; + +enum { + ETHTOOL_A_PHY_UNSPEC, + ETHTOOL_A_PHY_HEADER, + ETHTOOL_A_PHY_INDEX, + ETHTOOL_A_PHY_DRVNAME, + ETHTOOL_A_PHY_NAME, + ETHTOOL_A_PHY_UPSTREAM_TYPE, + ETHTOOL_A_PHY_UPSTREAM_INDEX, + ETHTOOL_A_PHY_UPSTREAM_SFP_NAME, + ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME, + + __ETHTOOL_A_PHY_CNT, + ETHTOOL_A_PHY_MAX = (__ETHTOOL_A_PHY_CNT - 1) +}; + +enum { + ETHTOOL_A_TSCONFIG_UNSPEC, + ETHTOOL_A_TSCONFIG_HEADER, + ETHTOOL_A_TSCONFIG_HWTSTAMP_PROVIDER, + ETHTOOL_A_TSCONFIG_TX_TYPES, + ETHTOOL_A_TSCONFIG_RX_FILTERS, + ETHTOOL_A_TSCONFIG_HWTSTAMP_FLAGS, + + __ETHTOOL_A_TSCONFIG_CNT, + ETHTOOL_A_TSCONFIG_MAX = (__ETHTOOL_A_TSCONFIG_CNT - 1) +}; + +enum { + ETHTOOL_MSG_USER_NONE = 0, + ETHTOOL_MSG_STRSET_GET = 1, + ETHTOOL_MSG_LINKINFO_GET, + ETHTOOL_MSG_LINKINFO_SET, + ETHTOOL_MSG_LINKMODES_GET, + ETHTOOL_MSG_LINKMODES_SET, + ETHTOOL_MSG_LINKSTATE_GET, + ETHTOOL_MSG_DEBUG_GET, + ETHTOOL_MSG_DEBUG_SET, + ETHTOOL_MSG_WOL_GET, + ETHTOOL_MSG_WOL_SET, + ETHTOOL_MSG_FEATURES_GET, + ETHTOOL_MSG_FEATURES_SET, + ETHTOOL_MSG_PRIVFLAGS_GET, + ETHTOOL_MSG_PRIVFLAGS_SET, + ETHTOOL_MSG_RINGS_GET, + ETHTOOL_MSG_RINGS_SET, + ETHTOOL_MSG_CHANNELS_GET, + ETHTOOL_MSG_CHANNELS_SET, + ETHTOOL_MSG_COALESCE_GET, + ETHTOOL_MSG_COALESCE_SET, + ETHTOOL_MSG_PAUSE_GET, + ETHTOOL_MSG_PAUSE_SET, + ETHTOOL_MSG_EEE_GET, + ETHTOOL_MSG_EEE_SET, + ETHTOOL_MSG_TSINFO_GET, + ETHTOOL_MSG_CABLE_TEST_ACT, + ETHTOOL_MSG_CABLE_TEST_TDR_ACT, + ETHTOOL_MSG_TUNNEL_INFO_GET, + ETHTOOL_MSG_FEC_GET, + ETHTOOL_MSG_FEC_SET, + ETHTOOL_MSG_MODULE_EEPROM_GET, + ETHTOOL_MSG_STATS_GET, + ETHTOOL_MSG_PHC_VCLOCKS_GET, + ETHTOOL_MSG_MODULE_GET, + ETHTOOL_MSG_MODULE_SET, + ETHTOOL_MSG_PSE_GET, + ETHTOOL_MSG_PSE_SET, + ETHTOOL_MSG_RSS_GET, + ETHTOOL_MSG_PLCA_GET_CFG, + ETHTOOL_MSG_PLCA_SET_CFG, + ETHTOOL_MSG_PLCA_GET_STATUS, + ETHTOOL_MSG_MM_GET, + ETHTOOL_MSG_MM_SET, + ETHTOOL_MSG_MODULE_FW_FLASH_ACT, + ETHTOOL_MSG_PHY_GET, + ETHTOOL_MSG_TSCONFIG_GET, + ETHTOOL_MSG_TSCONFIG_SET, + + __ETHTOOL_MSG_USER_CNT, + ETHTOOL_MSG_USER_MAX = (__ETHTOOL_MSG_USER_CNT - 1) +}; + +enum { + ETHTOOL_MSG_KERNEL_NONE = 0, + ETHTOOL_MSG_STRSET_GET_REPLY = 1, + ETHTOOL_MSG_LINKINFO_GET_REPLY, + ETHTOOL_MSG_LINKINFO_NTF, + ETHTOOL_MSG_LINKMODES_GET_REPLY, + ETHTOOL_MSG_LINKMODES_NTF, + ETHTOOL_MSG_LINKSTATE_GET_REPLY, + ETHTOOL_MSG_DEBUG_GET_REPLY, + ETHTOOL_MSG_DEBUG_NTF, + ETHTOOL_MSG_WOL_GET_REPLY, + ETHTOOL_MSG_WOL_NTF, + ETHTOOL_MSG_FEATURES_GET_REPLY, + ETHTOOL_MSG_FEATURES_SET_REPLY, + ETHTOOL_MSG_FEATURES_NTF, + ETHTOOL_MSG_PRIVFLAGS_GET_REPLY, + ETHTOOL_MSG_PRIVFLAGS_NTF, + ETHTOOL_MSG_RINGS_GET_REPLY, + ETHTOOL_MSG_RINGS_NTF, + ETHTOOL_MSG_CHANNELS_GET_REPLY, + ETHTOOL_MSG_CHANNELS_NTF, + ETHTOOL_MSG_COALESCE_GET_REPLY, + ETHTOOL_MSG_COALESCE_NTF, + ETHTOOL_MSG_PAUSE_GET_REPLY, + ETHTOOL_MSG_PAUSE_NTF, + ETHTOOL_MSG_EEE_GET_REPLY, + ETHTOOL_MSG_EEE_NTF, + ETHTOOL_MSG_TSINFO_GET_REPLY, + ETHTOOL_MSG_CABLE_TEST_NTF, + ETHTOOL_MSG_CABLE_TEST_TDR_NTF, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY, + ETHTOOL_MSG_FEC_GET_REPLY, + ETHTOOL_MSG_FEC_NTF, + ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY, + ETHTOOL_MSG_STATS_GET_REPLY, + ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY, + ETHTOOL_MSG_MODULE_GET_REPLY, + ETHTOOL_MSG_MODULE_NTF, + ETHTOOL_MSG_PSE_GET_REPLY, + ETHTOOL_MSG_RSS_GET_REPLY, + ETHTOOL_MSG_PLCA_GET_CFG_REPLY, + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY, + ETHTOOL_MSG_PLCA_NTF, + ETHTOOL_MSG_MM_GET_REPLY, + ETHTOOL_MSG_MM_NTF, + ETHTOOL_MSG_MODULE_FW_FLASH_NTF, + ETHTOOL_MSG_PHY_GET_REPLY, + ETHTOOL_MSG_PHY_NTF, + ETHTOOL_MSG_TSCONFIG_GET_REPLY, + ETHTOOL_MSG_TSCONFIG_SET_REPLY, + + __ETHTOOL_MSG_KERNEL_CNT, + ETHTOOL_MSG_KERNEL_MAX = (__ETHTOOL_MSG_KERNEL_CNT - 1) +}; + +#endif /* _UAPI_LINUX_ETHTOOL_NETLINK_GENERATED_H */ diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index 34f221d3a1b9..bd8167979707 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -25,6 +25,9 @@ #define FAN_OPEN_PERM 0x00010000 /* File open in perm check */ #define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */ #define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */ +/* #define FAN_DIR_MODIFY 0x00080000 */ /* Deprecated (reserved) */ + +#define FAN_PRE_ACCESS 0x00100000 /* Pre-content access hook */ #define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */ @@ -143,6 +146,7 @@ struct fanotify_event_metadata { #define FAN_EVENT_INFO_TYPE_DFID 3 #define FAN_EVENT_INFO_TYPE_PIDFD 4 #define FAN_EVENT_INFO_TYPE_ERROR 5 +#define FAN_EVENT_INFO_TYPE_RANGE 6 /* Special info types for FAN_RENAME */ #define FAN_EVENT_INFO_TYPE_OLD_DFID_NAME 10 @@ -189,6 +193,13 @@ struct fanotify_event_info_error { __u32 error_count; }; +struct fanotify_event_info_range { + struct fanotify_event_info_header hdr; + __u32 pad; + __u64 offset; + __u64 count; +}; + /* * User space may need to record additional information about its decision. * The extra information type records what kind of information is included. @@ -224,6 +235,13 @@ struct fanotify_response_info_audit_rule { /* Legit userspace responses to a _PERM event */ #define FAN_ALLOW 0x01 #define FAN_DENY 0x02 +/* errno other than EPERM can specified in upper byte of deny response */ +#define FAN_ERRNO_BITS 8 +#define FAN_ERRNO_SHIFT (32 - FAN_ERRNO_BITS) +#define FAN_ERRNO_MASK ((1 << FAN_ERRNO_BITS) - 1) +#define FAN_DENY_ERRNO(err) \ + (FAN_DENY | ((((__u32)(err)) & FAN_ERRNO_MASK) << FAN_ERRNO_SHIFT)) + #define FAN_AUDIT 0x10 /* Bitmask to create audit record for result */ #define FAN_INFO 0x20 /* Bitmask to indicate additional information */ diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index 6e6907e63bfc..a15ac2fa4b20 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -155,4 +155,8 @@ #define AT_HANDLE_MNT_ID_UNIQUE 0x001 /* Return the u64 unique mount ID. */ #define AT_HANDLE_CONNECTABLE 0x002 /* Request a connectable file handle */ +/* Flags for execveat2(2). */ +#define AT_EXECVE_CHECK 0x10000 /* Only perform a check if execution + would be allowed. */ + #endif /* _UAPI_LINUX_FCNTL_H */ diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h index a6924dd3aff1..00e9890ca3c0 100644 --- a/include/uapi/linux/fib_rules.h +++ b/include/uapi/linux/fib_rules.h @@ -68,6 +68,8 @@ enum { FRA_SPORT_RANGE, /* sport */ FRA_DPORT_RANGE, /* dport */ FRA_DSCP, /* dscp */ + FRA_FLOWLABEL, /* flowlabel */ + FRA_FLOWLABEL_MASK, /* flowlabel mask */ __FRA_MAX }; diff --git a/include/uapi/linux/fiemap.h b/include/uapi/linux/fiemap.h index 24ca0c00cae3..9d9e8ae32b41 100644 --- a/include/uapi/linux/fiemap.h +++ b/include/uapi/linux/fiemap.h @@ -14,37 +14,56 @@ #include <linux/types.h> +/** + * struct fiemap_extent - description of one fiemap extent + * @fe_logical: byte offset of the extent in the file + * @fe_physical: byte offset of extent on disk + * @fe_length: length in bytes for this extent + * @fe_flags: FIEMAP_EXTENT_* flags for this extent + */ struct fiemap_extent { - __u64 fe_logical; /* logical offset in bytes for the start of - * the extent from the beginning of the file */ - __u64 fe_physical; /* physical offset in bytes for the start - * of the extent from the beginning of the disk */ - __u64 fe_length; /* length in bytes for this extent */ + __u64 fe_logical; + __u64 fe_physical; + __u64 fe_length; + /* private: */ __u64 fe_reserved64[2]; - __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */ + /* public: */ + __u32 fe_flags; + /* private: */ __u32 fe_reserved[3]; }; +/** + * struct fiemap - file extent mappings + * @fm_start: byte offset (inclusive) at which to start mapping (in) + * @fm_length: logical length of mapping which userspace wants (in) + * @fm_flags: FIEMAP_FLAG_* flags for request (in/out) + * @fm_mapped_extents: number of extents that were mapped (out) + * @fm_extent_count: size of fm_extents array (in) + * @fm_extents: array of mapped extents (out) + */ struct fiemap { - __u64 fm_start; /* logical offset (inclusive) at - * which to start mapping (in) */ - __u64 fm_length; /* logical length of mapping which - * userspace wants (in) */ - __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */ - __u32 fm_mapped_extents;/* number of extents that were mapped (out) */ - __u32 fm_extent_count; /* size of fm_extents array (in) */ + __u64 fm_start; + __u64 fm_length; + __u32 fm_flags; + __u32 fm_mapped_extents; + __u32 fm_extent_count; + /* private: */ __u32 fm_reserved; - struct fiemap_extent fm_extents[]; /* array of mapped extents (out) */ + /* public: */ + struct fiemap_extent fm_extents[]; }; #define FIEMAP_MAX_OFFSET (~0ULL) +/* flags used in fm_flags: */ #define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */ #define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */ #define FIEMAP_FLAG_CACHE 0x00000004 /* request caching of the extents */ #define FIEMAP_FLAGS_COMPAT (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR) +/* flags used in fe_flags: */ #define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ #define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ #define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 753971770733..2bbe00cf1248 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -40,6 +40,15 @@ #define BLOCK_SIZE_BITS 10 #define BLOCK_SIZE (1<<BLOCK_SIZE_BITS) +/* flags for integrity meta */ +#define IO_INTEGRITY_CHK_GUARD (1U << 0) /* enforce guard check */ +#define IO_INTEGRITY_CHK_REFTAG (1U << 1) /* enforce ref check */ +#define IO_INTEGRITY_CHK_APPTAG (1U << 2) /* enforce app check */ + +#define IO_INTEGRITY_VALID_FLAGS (IO_INTEGRITY_CHK_GUARD | \ + IO_INTEGRITY_CHK_REFTAG | \ + IO_INTEGRITY_CHK_APPTAG) + #define SEEK_SET 0 /* seek relative to beginning of file */ #define SEEK_CUR 1 /* seek relative to current file position */ #define SEEK_END 2 /* seek relative to end of file */ @@ -332,9 +341,13 @@ typedef int __bitwise __kernel_rwf_t; /* Atomic Write */ #define RWF_ATOMIC ((__force __kernel_rwf_t)0x00000040) +/* buffered IO that drops the cache after reading or writing data */ +#define RWF_DONTCACHE ((__force __kernel_rwf_t)0x00000080) + /* mask of flags supported by the kernel */ #define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ - RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC) + RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC |\ + RWF_DONTCACHE) #define PROCFS_IOCTL_MAGIC 'f' diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 2575e0cd9b48..bfe880fbbb24 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -1315,6 +1315,8 @@ enum { IFLA_NETKIT_MODE, IFLA_NETKIT_SCRUB, IFLA_NETKIT_PEER_SCRUB, + IFLA_NETKIT_HEADROOM, + IFLA_NETKIT_TAILROOM, __IFLA_NETKIT_MAX, }; #define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) @@ -1394,6 +1396,7 @@ enum { IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ IFLA_VXLAN_LOCALBYPASS, IFLA_VXLAN_LABEL_POLICY, /* IPv6 flow label policy; ifla_vxlan_label_policy */ + IFLA_VXLAN_RESERVED_BITS, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 5d32d53508d9..ced0fc3c3aa5 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -79,6 +79,8 @@ enum { #define IPPROTO_MPLS IPPROTO_MPLS IPPROTO_ETHERNET = 143, /* Ethernet-within-IPv6 Encapsulation */ #define IPPROTO_ETHERNET IPPROTO_ETHERNET + IPPROTO_AGGFRAG = 144, /* AGGFRAG in ESP (RFC 9347) */ +#define IPPROTO_AGGFRAG IPPROTO_AGGFRAG IPPROTO_RAW = 255, /* Raw IP packets */ #define IPPROTO_RAW IPPROTO_RAW IPPROTO_SMC = 256, /* Shared Memory Communications */ diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index a4206723f503..5a199f3d4a26 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -519,6 +519,7 @@ #define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */ #define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */ #define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */ +#define KEY_LINK_PHONE 0x1bf /* AL Phone Syncing */ #define KEY_DEL_EOL 0x1c0 #define KEY_DEL_EOS 0x1c1 diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index aac9a4f8fa9a..e11c82638527 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -98,6 +98,10 @@ struct io_uring_sqe { __u64 addr3; __u64 __pad2[1]; }; + struct { + __u64 attr_ptr; /* pointer to attribute information */ + __u64 attr_type_mask; /* bit mask of attributes */ + }; __u64 optval; /* * If the ring is initialized with IORING_SETUP_SQE128, then @@ -107,6 +111,18 @@ struct io_uring_sqe { }; }; +/* sqe->attr_type_mask flags */ +#define IORING_RW_ATTR_FLAG_PI (1U << 0) +/* PI attribute information */ +struct io_uring_attr_pi { + __u16 flags; + __u16 app_tag; + __u32 len; + __u64 addr; + __u64 seed; + __u64 rsvd; +}; + /* * If sqe->file_index is set to this for opcodes that instantiate a new * direct descriptor (like openat/openat2/accept), then io_uring will allocate @@ -561,6 +577,7 @@ struct io_uring_params { #define IORING_FEAT_REG_REG_RING (1U << 13) #define IORING_FEAT_RECVSEND_BUNDLE (1U << 14) #define IORING_FEAT_MIN_TIMEOUT (1U << 15) +#define IORING_FEAT_RW_ATTR (1U << 16) /* * io_uring_register(2) opcodes and arguments diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index 34810f6ae2b5..78747b24bd0f 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -868,6 +868,7 @@ enum iommu_hwpt_pgfault_perm { * @pasid: Process Address Space ID * @grpid: Page Request Group Index * @perm: Combination of enum iommu_hwpt_pgfault_perm + * @__reserved: Must be 0. * @addr: Fault address * @length: a hint of how much data the requestor is expecting to fetch. For * example, if the PRI initiator knows it is going to do a 10MB @@ -883,7 +884,8 @@ struct iommu_hwpt_pgfault { __u32 pasid; __u32 grpid; __u32 perm; - __u64 addr; + __u32 __reserved; + __aligned_u64 addr; __u32 length; __u32 cookie; }; diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index 283dec7e3645..5bd7ce934d74 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h @@ -137,6 +137,22 @@ struct ip_beet_phdr { __u8 reserved; }; +struct ip_iptfs_hdr { + __u8 subtype; /* 0*: basic, 1: CC */ + __u8 flags; + __be16 block_offset; +}; + +struct ip_iptfs_cc_hdr { + __u8 subtype; /* 0: basic, 1*: CC */ + __u8 flags; + __be16 block_offset; + __be32 loss_rate; + __be64 rtt_adelay_xdelay; + __be32 tval; + __be32 techo; +}; + /* index values for the variables in ipv4_devconf */ enum { diff --git a/include/uapi/linux/ipsec.h b/include/uapi/linux/ipsec.h index 50d8ee1791e2..696b790f4346 100644 --- a/include/uapi/linux/ipsec.h +++ b/include/uapi/linux/ipsec.h @@ -14,7 +14,8 @@ enum { IPSEC_MODE_ANY = 0, /* We do not support this for SA */ IPSEC_MODE_TRANSPORT = 1, IPSEC_MODE_TUNNEL = 2, - IPSEC_MODE_BEET = 3 + IPSEC_MODE_BEET = 3, + IPSEC_MODE_IPTFS = 4 }; enum { diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 502ea63b5d2e..45e6d8fca9b9 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -617,10 +617,6 @@ struct kvm_ioeventfd { #define KVM_X86_DISABLE_EXITS_HLT (1 << 1) #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) #define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3) -#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ - KVM_X86_DISABLE_EXITS_HLT | \ - KVM_X86_DISABLE_EXITS_PAUSE | \ - KVM_X86_DISABLE_EXITS_CSTATE) /* for KVM_ENABLE_CAP */ struct kvm_enable_cap { @@ -1070,6 +1066,10 @@ struct kvm_dirty_tlb { #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL + +#define KVM_REG_SIZE(id) \ + (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) + #define KVM_REG_SIZE_U8 0x0000000000000000ULL #define KVM_REG_SIZE_U16 0x0010000000000000ULL #define KVM_REG_SIZE_U32 0x0020000000000000ULL diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index f0d3f268240d..6975f182b22c 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -125,6 +125,7 @@ #define MDIO_STAT1_LPOWERABLE 0x0002 /* Low-power ability */ #define MDIO_STAT1_LSTATUS BMSR_LSTATUS #define MDIO_STAT1_FAULT 0x0080 /* Fault */ +#define MDIO_PCS_STAT1_CLKSTOP_CAP 0x0040 #define MDIO_AN_STAT1_LPABLE 0x0001 /* Link partner AN ability */ #define MDIO_AN_STAT1_ABLE BMSR_ANEGCAPABLE #define MDIO_AN_STAT1_RFAULT BMSR_RFAULT diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 858339d1c1c4..55b0ab51096c 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -13,6 +13,17 @@ #include <linux/types.h> #include <linux/socket.h> /* for SO_TIMESTAMPING */ +/* + * Possible type of hwtstamp provider. Mainly "precise" the default one + * is for IEEE 1588 quality and "approx" is for NICs DMA point. + */ +enum hwtstamp_provider_qualifier { + HWTSTAMP_PROVIDER_QUALIFIER_PRECISE, + HWTSTAMP_PROVIDER_QUALIFIER_APPROX, + + HWTSTAMP_PROVIDER_QUALIFIER_CNT, +}; + /* SO_TIMESTAMPING flags */ enum { SOF_TIMESTAMPING_TX_HARDWARE = (1<<0), diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h index c2ac7269acf7..43233af75b9d 100644 --- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h +++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h @@ -57,6 +57,7 @@ enum ctattr_type { CTA_SYNPROXY, CTA_FILTER, CTA_STATUS_MASK, + CTA_TIMESTAMP_EVENT, __CTA_MAX }; #define CTA_MAX (__CTA_MAX - 1) diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h index caf4db2fcbb9..4273e0249fcb 100644 --- a/include/uapi/linux/nfs4.h +++ b/include/uapi/linux/nfs4.h @@ -58,7 +58,7 @@ #define NFS4_SHARE_DENY_BOTH 0x0003 /* nfs41 */ -#define NFS4_SHARE_WANT_MASK 0xFF00 +#define NFS4_SHARE_WANT_TYPE_MASK 0xFF00 #define NFS4_SHARE_WANT_NO_PREFERENCE 0x0000 #define NFS4_SHARE_WANT_READ_DELEG 0x0100 #define NFS4_SHARE_WANT_WRITE_DELEG 0x0200 @@ -66,13 +66,16 @@ #define NFS4_SHARE_WANT_NO_DELEG 0x0400 #define NFS4_SHARE_WANT_CANCEL 0x0500 -#define NFS4_SHARE_WHEN_MASK 0xF0000 +#define NFS4_SHARE_WHEN_MASK 0xF0000 #define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000 #define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000 +#define NFS4_SHARE_WANT_MOD_MASK 0xF00000 #define NFS4_SHARE_WANT_DELEG_TIMESTAMPS 0x100000 #define NFS4_SHARE_WANT_OPEN_XOR_DELEGATION 0x200000 +#define NFS4_SHARE_WANT_MASK (NFS4_SHARE_WANT_TYPE_MASK | NFS4_SHARE_WANT_MOD_MASK) + #define NFS4_CDFC4_FORE 0x1 #define NFS4_CDFC4_BACK 0x2 #define NFS4_CDFC4_BOTH 0x3 diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 6d11437596b9..f6c1b181c886 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1329,6 +1329,13 @@ * %NL80211_ATTR_MLO_TTLM_ULINK attributes are used to specify the * TID to Link mapping for downlink/uplink traffic. * + * @NL80211_CMD_ASSOC_MLO_RECONF: For a non-AP MLD station, request to + * add/remove links to/from the association. + * + * @NL80211_CMD_EPCS_CFG: EPCS configuration for a station. Used by userland to + * control EPCS configuration. Used to notify userland on the current state + * of EPCS. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1586,6 +1593,9 @@ enum nl80211_commands { NL80211_CMD_SET_TID_TO_LINK_MAPPING, + NL80211_CMD_ASSOC_MLO_RECONF, + NL80211_CMD_EPCS_CFG, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -2871,6 +2881,18 @@ enum nl80211_commands { * @NL80211_ATTR_VIF_RADIO_MASK: Bitmask of allowed radios (u32). * A value of 0 means all radios. * + * @NL80211_ATTR_SUPPORTED_SELECTORS: supported selectors, array of + * supported selectors as defined by IEEE 802.11 7.3.2.2 but without the + * length restriction (at most %NL80211_MAX_SUPP_SELECTORS). + * This can be used to provide a list of selectors that are implemented + * by the supplicant. If not given, support for SAE_H2E is assumed. + * + * @NL80211_ATTR_MLO_RECONF_REM_LINKS: (u16) A bitmask of the links requested + * to be removed from the MLO association. + * + * @NL80211_ATTR_EPCS: Flag attribute indicating that EPCS is enabled for a + * station interface. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3421,6 +3443,11 @@ enum nl80211_attrs { NL80211_ATTR_VIF_RADIO_MASK, + NL80211_ATTR_SUPPORTED_SELECTORS, + + NL80211_ATTR_MLO_RECONF_REM_LINKS, + NL80211_ATTR_EPCS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3465,6 +3492,7 @@ enum nl80211_attrs { #define NL80211_WIPHY_NAME_MAXLEN 64 #define NL80211_MAX_SUPP_RATES 32 +#define NL80211_MAX_SUPP_SELECTORS 128 #define NL80211_MAX_SUPP_HT_RATES 77 #define NL80211_MAX_SUPP_REG_RULES 128 #define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0 diff --git a/include/uapi/linux/ntsync.h b/include/uapi/linux/ntsync.h index dcfa38fdc93c..6d06793512b1 100644 --- a/include/uapi/linux/ntsync.h +++ b/include/uapi/linux/ntsync.h @@ -11,13 +11,49 @@ #include <linux/types.h> struct ntsync_sem_args { - __u32 sem; __u32 count; __u32 max; }; -#define NTSYNC_IOC_CREATE_SEM _IOWR('N', 0x80, struct ntsync_sem_args) +struct ntsync_mutex_args { + __u32 owner; + __u32 count; +}; + +struct ntsync_event_args { + __u32 manual; + __u32 signaled; +}; + +#define NTSYNC_WAIT_REALTIME 0x1 + +struct ntsync_wait_args { + __u64 timeout; + __u64 objs; + __u32 count; + __u32 index; + __u32 flags; + __u32 owner; + __u32 alert; + __u32 pad; +}; + +#define NTSYNC_MAX_WAIT_COUNT 64 + +#define NTSYNC_IOC_CREATE_SEM _IOW ('N', 0x80, struct ntsync_sem_args) +#define NTSYNC_IOC_WAIT_ANY _IOWR('N', 0x82, struct ntsync_wait_args) +#define NTSYNC_IOC_WAIT_ALL _IOWR('N', 0x83, struct ntsync_wait_args) +#define NTSYNC_IOC_CREATE_MUTEX _IOW ('N', 0x84, struct ntsync_mutex_args) +#define NTSYNC_IOC_CREATE_EVENT _IOW ('N', 0x87, struct ntsync_event_args) -#define NTSYNC_IOC_SEM_POST _IOWR('N', 0x81, __u32) +#define NTSYNC_IOC_SEM_RELEASE _IOWR('N', 0x81, __u32) +#define NTSYNC_IOC_MUTEX_UNLOCK _IOWR('N', 0x85, struct ntsync_mutex_args) +#define NTSYNC_IOC_MUTEX_KILL _IOW ('N', 0x86, __u32) +#define NTSYNC_IOC_EVENT_SET _IOR ('N', 0x88, __u32) +#define NTSYNC_IOC_EVENT_RESET _IOR ('N', 0x89, __u32) +#define NTSYNC_IOC_EVENT_PULSE _IOR ('N', 0x8a, __u32) +#define NTSYNC_IOC_SEM_READ _IOR ('N', 0x8b, struct ntsync_sem_args) +#define NTSYNC_IOC_MUTEX_READ _IOR ('N', 0x8c, struct ntsync_mutex_args) +#define NTSYNC_IOC_EVENT_READ _IOR ('N', 0x8d, struct ntsync_event_args) #endif diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 1601c7ed5fab..3445c4970e4d 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -533,7 +533,7 @@ #define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */ #define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */ #define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */ -#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */ +#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Max Link Speed (prior to PCIe r3.0: Supported Link Speeds) */ #define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ #define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ #define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ @@ -665,6 +665,7 @@ #define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ #define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */ #define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */ +#define PCI_EXP_DEVCAP2_EE_PREFIX_MAX 0x00c00000 /* Max End-End TLP Prefixes */ #define PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */ #define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ #define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */ @@ -789,10 +790,11 @@ /* Same bits as above */ #define PCI_ERR_CAP 0x18 /* Advanced Error Capabilities & Ctrl*/ #define PCI_ERR_CAP_FEP(x) ((x) & 0x1f) /* First Error Pointer */ -#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */ -#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */ -#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */ -#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */ +#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */ +#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */ +#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */ +#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */ +#define PCI_ERR_CAP_PREFIX_LOG_PRESENT 0x00000800 /* TLP Prefix Log Present */ #define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */ #define PCI_ERR_ROOT_COMMAND 0x2c /* Root Error Command */ #define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */ @@ -808,6 +810,7 @@ #define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */ #define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */ #define PCI_ERR_ROOT_ERR_SRC 0x34 /* Error Source Identification */ +#define PCI_ERR_PREFIX_LOG 0x38 /* TLP Prefix LOG Register (up to 16 bytes) */ /* Virtual Channel */ #define PCI_VC_PORT_CAP1 0x04 @@ -1001,9 +1004,6 @@ #define PCI_ACS_CTRL 0x06 /* ACS Control Register */ #define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */ -#define PCI_VSEC_HDR 4 /* extended cap - vendor-specific */ -#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */ - /* SATA capability */ #define PCI_SATA_REGS 4 /* SATA REGs specifier */ #define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */ diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h index 94b46b043b53..acd261f49866 100644 --- a/include/uapi/linux/pcitest.h +++ b/include/uapi/linux/pcitest.h @@ -20,6 +20,7 @@ #define PCITEST_MSIX _IOW('P', 0x7, int) #define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int) #define PCITEST_GET_IRQTYPE _IO('P', 0x9) +#define PCITEST_BARS _IO('P', 0xa) #define PCITEST_CLEAR_IRQ _IO('P', 0x10) #define PCITEST_FLAGS_USE_DMA 0x00000001 diff --git a/include/uapi/linux/pps_gen.h b/include/uapi/linux/pps_gen.h new file mode 100644 index 000000000000..60a5d0fcfa68 --- /dev/null +++ b/include/uapi/linux/pps_gen.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * PPS generator API header + * + * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com> + */ + +#ifndef _PPS_GEN_H_ +#define _PPS_GEN_H_ + +#include <linux/types.h> +#include <linux/ioctl.h> + +/** + * struct pps_gen_event - the PPS generator events + * @event: the event type + * @sequence: the event sequence number + * + * Userspace can get the last PPS generator event by using the + * ioctl(pps_gen, PPS_GEN_FETCHEVENT, ...) syscall. + * The sequence field can be used to save the last event ID, while in the + * event field is stored the last event type. Currently known event is: + * + * PPS_GEN_EVENT_MISSEDPULSE : last pulse was not generated + */ +struct pps_gen_event { + unsigned int event; + unsigned int sequence; +}; + +#define PPS_GEN_EVENT_MISSEDPULSE 1 + +#define PPS_GEN_SETENABLE _IOW('p', 0xb1, unsigned int *) +#define PPS_GEN_USESYSTEMCLOCK _IOR('p', 0xb2, unsigned int *) +#define PPS_GEN_FETCHEVENT _IOR('p', 0xb3, struct pps_gen_event *) + +#endif /* _PPS_GEN_H_ */ diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index 5a43c23f53bf..ff47b6f0ba0f 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -233,7 +233,7 @@ struct mdp_superblock_1 { char set_name[32]; /* set and interpreted by user-space */ __le64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/ - __le32 level; /* 0,1,4,5 */ + __le32 level; /* 0,1,4,5, -1 (linear) */ __le32 layout; /* only for raid5 and raid10 currently */ __le64 size; /* used size of component devices, in 512byte sectors */ diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h index 7be89a4906e7..a893010735fb 100644 --- a/include/uapi/linux/raid/md_u.h +++ b/include/uapi/linux/raid/md_u.h @@ -103,6 +103,8 @@ typedef struct mdu_array_info_s { } mdu_array_info_t; +#define LEVEL_LINEAR (-1) + /* we need a value for 'no level specified' and 0 * means 'raid0', so we need something else. This is * for internal use only diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index db7254d52d93..66c3903d29cf 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -93,10 +93,18 @@ enum { RTM_NEWPREFIX = 52, #define RTM_NEWPREFIX RTM_NEWPREFIX - RTM_GETMULTICAST = 58, + RTM_NEWMULTICAST = 56, +#define RTM_NEWMULTICAST RTM_NEWMULTICAST + RTM_DELMULTICAST, +#define RTM_DELMULTICAST RTM_DELMULTICAST + RTM_GETMULTICAST, #define RTM_GETMULTICAST RTM_GETMULTICAST - RTM_GETANYCAST = 62, + RTM_NEWANYCAST = 60, +#define RTM_NEWANYCAST RTM_NEWANYCAST + RTM_DELANYCAST, +#define RTM_DELANYCAST RTM_DELANYCAST + RTM_GETANYCAST, #define RTM_GETANYCAST RTM_GETANYCAST RTM_NEWNEIGHTBL = 64, @@ -389,6 +397,7 @@ enum rtattr_type_t { RTA_SPORT, RTA_DPORT, RTA_NH_ID, + RTA_FLOWLABEL, __RTA_MAX }; @@ -774,6 +783,12 @@ enum rtnetlink_groups { #define RTNLGRP_TUNNEL RTNLGRP_TUNNEL RTNLGRP_STATS, #define RTNLGRP_STATS RTNLGRP_STATS + RTNLGRP_IPV4_MCADDR, +#define RTNLGRP_IPV4_MCADDR RTNLGRP_IPV4_MCADDR + RTNLGRP_IPV6_MCADDR, +#define RTNLGRP_IPV6_MCADDR RTNLGRP_IPV6_MCADDR + RTNLGRP_IPV6_ACADDR, +#define RTNLGRP_IPV6_ACADDR RTNLGRP_IPV6_ACADDR __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/include/uapi/linux/securebits.h b/include/uapi/linux/securebits.h index d6d98877ff1a..3fba30dbd68b 100644 --- a/include/uapi/linux/securebits.h +++ b/include/uapi/linux/securebits.h @@ -52,10 +52,32 @@ #define SECBIT_NO_CAP_AMBIENT_RAISE_LOCKED \ (issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE_LOCKED)) +/* See Documentation/userspace-api/check_exec.rst */ +#define SECURE_EXEC_RESTRICT_FILE 8 +#define SECURE_EXEC_RESTRICT_FILE_LOCKED 9 /* make bit-8 immutable */ + +#define SECBIT_EXEC_RESTRICT_FILE (issecure_mask(SECURE_EXEC_RESTRICT_FILE)) +#define SECBIT_EXEC_RESTRICT_FILE_LOCKED \ + (issecure_mask(SECURE_EXEC_RESTRICT_FILE_LOCKED)) + +/* See Documentation/userspace-api/check_exec.rst */ +#define SECURE_EXEC_DENY_INTERACTIVE 10 +#define SECURE_EXEC_DENY_INTERACTIVE_LOCKED 11 /* make bit-10 immutable */ + +#define SECBIT_EXEC_DENY_INTERACTIVE \ + (issecure_mask(SECURE_EXEC_DENY_INTERACTIVE)) +#define SECBIT_EXEC_DENY_INTERACTIVE_LOCKED \ + (issecure_mask(SECURE_EXEC_DENY_INTERACTIVE_LOCKED)) + #define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ issecure_mask(SECURE_NO_SETUID_FIXUP) | \ issecure_mask(SECURE_KEEP_CAPS) | \ - issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE)) + issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE) | \ + issecure_mask(SECURE_EXEC_RESTRICT_FILE) | \ + issecure_mask(SECURE_EXEC_DENY_INTERACTIVE)) #define SECURE_ALL_LOCKS (SECURE_ALL_BITS << 1) +#define SECURE_ALL_UNPRIVILEGED (issecure_mask(SECURE_EXEC_RESTRICT_FILE) | \ + issecure_mask(SECURE_EXEC_DENY_INTERACTIVE)) + #endif /* _UAPI_LINUX_SECUREBITS_H */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index adf5fd78dd50..848c7784e684 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -186,6 +186,7 @@ enum LINUX_MIB_TIMEWAITKILLED, /* TimeWaitKilled */ LINUX_MIB_PAWSACTIVEREJECTED, /* PAWSActiveRejected */ LINUX_MIB_PAWSESTABREJECTED, /* PAWSEstabRejected */ + LINUX_MIB_PAWS_OLD_ACK, /* PAWSOldAck */ LINUX_MIB_DELAYEDACKS, /* DelayedACKs */ LINUX_MIB_DELAYEDACKLOCKED, /* DelayedACKLocked */ LINUX_MIB_DELAYEDACKLOST, /* DelayedACKLost */ @@ -339,6 +340,8 @@ enum LINUX_MIB_XFRMACQUIREERROR, /* XfrmAcquireError */ LINUX_MIB_XFRMOUTSTATEDIRERROR, /* XfrmOutStateDirError */ LINUX_MIB_XFRMINSTATEDIRERROR, /* XfrmInStateDirError */ + LINUX_MIB_XFRMINIPTFSERROR, /* XfrmInIptfsError */ + LINUX_MIB_XFRMOUTNOQSPACE, /* XfrmOutNoQueueSpace */ __LINUX_MIB_XFRMMAX }; @@ -358,6 +361,11 @@ enum LINUX_MIB_TLSRXDEVICERESYNC, /* TlsRxDeviceResync */ LINUX_MIB_TLSDECRYPTRETRY, /* TlsDecryptRetry */ LINUX_MIB_TLSRXNOPADVIOL, /* TlsRxNoPadViolation */ + LINUX_MIB_TLSRXREKEYOK, /* TlsRxRekeyOk */ + LINUX_MIB_TLSRXREKEYERROR, /* TlsRxRekeyError */ + LINUX_MIB_TLSTXREKEYOK, /* TlsTxRekeyOk */ + LINUX_MIB_TLSTXREKEYERROR, /* TlsTxRekeyError */ + LINUX_MIB_TLSRXREKEYRECEIVED, /* TlsRxRekeyReceived */ __LINUX_MIB_TLSMAX }; diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 887a25286441..f78ee3670dd5 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -98,43 +98,93 @@ struct statx_timestamp { */ struct statx { /* 0x00 */ - __u32 stx_mask; /* What results were written [uncond] */ - __u32 stx_blksize; /* Preferred general I/O size [uncond] */ - __u64 stx_attributes; /* Flags conveying information about the file [uncond] */ + /* What results were written [uncond] */ + __u32 stx_mask; + + /* Preferred general I/O size [uncond] */ + __u32 stx_blksize; + + /* Flags conveying information about the file [uncond] */ + __u64 stx_attributes; + /* 0x10 */ - __u32 stx_nlink; /* Number of hard links */ - __u32 stx_uid; /* User ID of owner */ - __u32 stx_gid; /* Group ID of owner */ - __u16 stx_mode; /* File mode */ + /* Number of hard links */ + __u32 stx_nlink; + + /* User ID of owner */ + __u32 stx_uid; + + /* Group ID of owner */ + __u32 stx_gid; + + /* File mode */ + __u16 stx_mode; __u16 __spare0[1]; + /* 0x20 */ - __u64 stx_ino; /* Inode number */ - __u64 stx_size; /* File size */ - __u64 stx_blocks; /* Number of 512-byte blocks allocated */ - __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */ + /* Inode number */ + __u64 stx_ino; + + /* File size */ + __u64 stx_size; + + /* Number of 512-byte blocks allocated */ + __u64 stx_blocks; + + /* Mask to show what's supported in stx_attributes */ + __u64 stx_attributes_mask; + /* 0x40 */ - struct statx_timestamp stx_atime; /* Last access time */ - struct statx_timestamp stx_btime; /* File creation time */ - struct statx_timestamp stx_ctime; /* Last attribute change time */ - struct statx_timestamp stx_mtime; /* Last data modification time */ + /* Last access time */ + struct statx_timestamp stx_atime; + + /* File creation time */ + struct statx_timestamp stx_btime; + + /* Last attribute change time */ + struct statx_timestamp stx_ctime; + + /* Last data modification time */ + struct statx_timestamp stx_mtime; + /* 0x80 */ - __u32 stx_rdev_major; /* Device ID of special file [if bdev/cdev] */ + /* Device ID of special file [if bdev/cdev] */ + __u32 stx_rdev_major; __u32 stx_rdev_minor; - __u32 stx_dev_major; /* ID of device containing file [uncond] */ + + /* ID of device containing file [uncond] */ + __u32 stx_dev_major; __u32 stx_dev_minor; + /* 0x90 */ __u64 stx_mnt_id; - __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */ - __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */ + + /* Memory buffer alignment for direct I/O */ + __u32 stx_dio_mem_align; + + /* File offset alignment for direct I/O */ + __u32 stx_dio_offset_align; + /* 0xa0 */ - __u64 stx_subvol; /* Subvolume identifier */ - __u32 stx_atomic_write_unit_min; /* Min atomic write unit in bytes */ - __u32 stx_atomic_write_unit_max; /* Max atomic write unit in bytes */ + /* Subvolume identifier */ + __u64 stx_subvol; + + /* Min atomic write unit in bytes */ + __u32 stx_atomic_write_unit_min; + + /* Max atomic write unit in bytes */ + __u32 stx_atomic_write_unit_max; + /* 0xb0 */ - __u32 stx_atomic_write_segments_max; /* Max atomic write segment count */ - __u32 __spare1[1]; + /* Max atomic write segment count */ + __u32 stx_atomic_write_segments_max; + + /* File offset alignment for direct I/O reads */ + __u32 stx_dio_read_offset_align; + /* 0xb8 */ __u64 __spare3[9]; /* Spare space for future expansion */ + /* 0x100 */ }; @@ -164,6 +214,7 @@ struct statx { #define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */ #define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */ #define STATX_WRITE_ATOMIC 0x00010000U /* Want/got atomic_write_* fields */ +#define STATX_DIO_READ_ALIGN 0x00020000U /* Want/got dio read alignment info */ #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h index b50b2eb257a0..934e20ef7f79 100644 --- a/include/uapi/linux/taskstats.h +++ b/include/uapi/linux/taskstats.h @@ -72,6 +72,8 @@ struct taskstats { */ __u64 cpu_count __attribute__((aligned(8))); __u64 cpu_delay_total; + __u64 cpu_delay_max; + __u64 cpu_delay_min; /* Following four fields atomically updated using task->delays->lock */ @@ -80,10 +82,14 @@ struct taskstats { */ __u64 blkio_count; __u64 blkio_delay_total; + __u64 blkio_delay_max; + __u64 blkio_delay_min; /* Delay waiting for page fault I/O (swap in only) */ __u64 swapin_count; __u64 swapin_delay_total; + __u64 swapin_delay_max; + __u64 swapin_delay_min; /* cpu "wall-clock" running time * On some architectures, value will adjust for cpu time stolen @@ -166,10 +172,14 @@ struct taskstats { /* Delay waiting for memory reclaim */ __u64 freepages_count; __u64 freepages_delay_total; + __u64 freepages_delay_max; + __u64 freepages_delay_min; /* Delay waiting for thrashing page */ __u64 thrashing_count; __u64 thrashing_delay_total; + __u64 thrashing_delay_max; + __u64 thrashing_delay_min; /* v10: 64-bit btime to avoid overflow */ __u64 ac_btime64; /* 64-bit begin time */ @@ -177,6 +187,8 @@ struct taskstats { /* v11: Delay waiting for memory compact */ __u64 compact_count; __u64 compact_delay_total; + __u64 compact_delay_max; + __u64 compact_delay_min; /* v12 begin */ __u32 ac_tgid; /* thread group ID */ @@ -198,10 +210,15 @@ struct taskstats { /* v13: Delay waiting for write-protect copy */ __u64 wpcopy_count; __u64 wpcopy_delay_total; + __u64 wpcopy_delay_max; + __u64 wpcopy_delay_min; /* v14: Delay waiting for IRQ/SOFTIRQ */ __u64 irq_count; __u64 irq_delay_total; + __u64 irq_delay_max; + __u64 irq_delay_min; + /* v15: add Delay max */ }; diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h index 2ebdba111a8f..beef1752e36e 100644 --- a/include/uapi/linux/usb/functionfs.h +++ b/include/uapi/linux/usb/functionfs.h @@ -206,7 +206,7 @@ struct usb_ffs_dmabuf_transfer_req { * +-----+-----------------+------+--------------------------+ * | off | name | type | description | * +-----+-----------------+------+--------------------------+ - * | 0 | inteface | U8 | related interface number | + * | 0 | interface | U8 | related interface number | * +-----+-----------------+------+--------------------------+ * | 1 | dwLength | U32 | length of the descriptor | * +-----+-----------------+------+--------------------------+ @@ -224,7 +224,7 @@ struct usb_ffs_dmabuf_transfer_req { * +-----+-----------------+------+--------------------------+ * | off | name | type | description | * +-----+-----------------+------+--------------------------+ - * | 0 | inteface | U8 | related interface number | + * | 0 | interface | U8 | related interface number | * +-----+-----------------+------+--------------------------+ * | 1 | dwLength | U32 | length of the descriptor | * +-----+-----------------+------+--------------------------+ @@ -237,7 +237,7 @@ struct usb_ffs_dmabuf_transfer_req { * | 11 | ExtProp[] | | list of ext. prop. d. | * +-----+-----------------+------+--------------------------+ * - * ExtCompat[] is an array of valid Extended Compatiblity descriptors + * ExtCompat[] is an array of valid Extended Compatibility descriptors * which have the following format: * * +-----+-----------------------+------+-------------------------------------+ @@ -295,7 +295,7 @@ struct usb_functionfs_strings_head { * | 16 | stringtab | StringTab[lang_count] | table of strings per lang | * * For each language there is one stringtab entry (ie. there are lang_count - * stringtab entires). Each StringTab has following format: + * stringtab entries). Each StringTab has following format: * * | off | name | type | description | * |-----+---------+-------------------+------------------------------------| diff --git a/include/uapi/linux/vduse.h b/include/uapi/linux/vduse.h index 11bd48c72c6c..68a627d04afa 100644 --- a/include/uapi/linux/vduse.h +++ b/include/uapi/linux/vduse.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ #ifndef _UAPI_VDUSE_H_ #define _UAPI_VDUSE_H_ diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 1beb317df1b9..8549d4571257 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -116,6 +116,8 @@ #define VIRTIO_PCI_CAP_PCI_CFG 5 /* Additional shared memory capability */ #define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8 +/* PCI vendor data configuration */ +#define VIRTIO_PCI_CAP_VENDOR_CFG 9 /* This is the PCI capability header: */ struct virtio_pci_cap { @@ -130,6 +132,18 @@ struct virtio_pci_cap { __le32 length; /* Length of the structure, in bytes. */ }; +/* This is the PCI vendor data capability header: */ +struct virtio_pci_vndr_data { + __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + __u8 cap_next; /* Generic PCI field: next ptr. */ + __u8 cap_len; /* Generic PCI field: capability length */ + __u8 cfg_type; /* Identifies the structure. */ + __u16 vendor_id; /* Identifies the vendor-specific format. */ + /* For Vendor Definition */ + /* Pads structure to a multiple of 4 bytes */ + /* Reads must not have side effects */ +}; + struct virtio_pci_cap64 { struct virtio_pci_cap cap; __le32 offset_hi; /* Most sig 32 bits of offset */ diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index d73a97e3030a..a23495c0e0a1 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -158,7 +158,8 @@ enum { #define XFRM_MODE_ROUTEOPTIMIZATION 2 #define XFRM_MODE_IN_TRIGGER 3 #define XFRM_MODE_BEET 4 -#define XFRM_MODE_MAX 5 +#define XFRM_MODE_IPTFS 5 +#define XFRM_MODE_MAX 6 /* Netlink configuration messages. */ enum { @@ -323,6 +324,12 @@ enum xfrm_attr_type_t { XFRMA_SA_DIR, /* __u8 */ XFRMA_NAT_KEEPALIVE_INTERVAL, /* __u32 in seconds for NAT keepalive */ XFRMA_SA_PCPU, /* __u32 */ + XFRMA_IPTFS_DROP_TIME, /* __u32 in: usec to wait for next seq */ + XFRMA_IPTFS_REORDER_WINDOW, /* __u16 in: reorder window size (pkts) */ + XFRMA_IPTFS_DONT_FRAG, /* out: don't use fragmentation */ + XFRMA_IPTFS_INIT_DELAY, /* __u32 out: initial packet wait delay (usec) */ + XFRMA_IPTFS_MAX_QSIZE, /* __u32 out: max ingress queue size (octets) */ + XFRMA_IPTFS_PKT_SIZE, /* __u32 out: size of outer packet, 0 for PMTU */ __XFRMA_MAX #define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */ diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h index bc30c1f2a109..a5c41f771e05 100644 --- a/include/uapi/sound/asequencer.h +++ b/include/uapi/sound/asequencer.h @@ -10,7 +10,7 @@ #include <sound/asound.h> /** version of the sequencer */ -#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 4) +#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 5) /** * definition of sequencer event types @@ -92,6 +92,9 @@ #define SNDRV_SEQ_EVENT_PORT_SUBSCRIBED 66 /* ports connected */ #define SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED 67 /* ports disconnected */ +#define SNDRV_SEQ_EVENT_UMP_EP_CHANGE 68 /* UMP EP info has changed */ +#define SNDRV_SEQ_EVENT_UMP_BLOCK_CHANGE 69 /* UMP block info has changed */ + /* 70-89: synthesizer events - obsoleted */ /** user-defined events with fixed length @@ -253,6 +256,12 @@ struct snd_seq_ev_quote { struct snd_seq_event *event; /* quoted event */ } __packed; + /* UMP info change notify */ +struct snd_seq_ev_ump_notify { + unsigned char client; /**< Client number */ + unsigned char block; /**< Block number (optional) */ +}; + union snd_seq_event_data { /* event data... */ struct snd_seq_ev_note note; struct snd_seq_ev_ctrl control; @@ -265,6 +274,7 @@ union snd_seq_event_data { /* event data... */ struct snd_seq_connect connect; struct snd_seq_result result; struct snd_seq_ev_quote quote; + struct snd_seq_ev_ump_notify ump_notify; }; /* sequencer event */ diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index 4cd513215bcd..5a049eeaecce 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -716,7 +716,7 @@ enum { * Raw MIDI section - /dev/snd/midi?? */ -#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 4) +#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 5) enum { SNDRV_RAWMIDI_STREAM_OUTPUT = 0, @@ -728,6 +728,9 @@ enum { #define SNDRV_RAWMIDI_INFO_INPUT 0x00000002 #define SNDRV_RAWMIDI_INFO_DUPLEX 0x00000004 #define SNDRV_RAWMIDI_INFO_UMP 0x00000008 +#define SNDRV_RAWMIDI_INFO_STREAM_INACTIVE 0x00000010 + +#define SNDRV_RAWMIDI_DEVICE_UNKNOWN 0 struct snd_rawmidi_info { unsigned int device; /* RO/WR (control): device number */ @@ -740,7 +743,8 @@ struct snd_rawmidi_info { unsigned char subname[32]; /* name of active or selected subdevice */ unsigned int subdevices_count; unsigned int subdevices_avail; - unsigned char reserved[64]; /* reserved for future use */ + int tied_device; /* R: tied rawmidi device (UMP/legacy) */ + unsigned char reserved[60]; /* reserved for future use */ }; #define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0) diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h index ddc77322d571..bc7648a30746 100644 --- a/include/uapi/sound/compress_params.h +++ b/include/uapi/sound/compress_params.h @@ -334,6 +334,14 @@ union snd_codec_options { struct snd_dec_wma wma_d; struct snd_dec_alac alac_d; struct snd_dec_ape ape_d; + struct { + __u32 out_sample_rate; + } src_d; +} __attribute__((packed, aligned(4))); + +struct snd_codec_desc_src { + __u32 out_sample_rate_min; + __u32 out_sample_rate_max; } __attribute__((packed, aligned(4))); /** struct snd_codec_desc - description of codec capabilities @@ -347,6 +355,9 @@ union snd_codec_options { * @modes: Supported modes. See SND_AUDIOMODE defines * @formats: Supported formats. See SND_AUDIOSTREAMFORMAT defines * @min_buffer: Minimum buffer size handled by codec implementation + * @pcm_formats: Output (for decoders) or input (for encoders) + * PCM formats (required to accel mode, 0 for other modes) + * @u_space: union space (for codec dependent data) * @reserved: reserved for future use * * This structure provides a scalar value for profiles, modes and stream @@ -370,7 +381,12 @@ struct snd_codec_desc { __u32 modes; __u32 formats; __u32 min_buffer; - __u32 reserved[15]; + __u32 pcm_formats; + union { + __u32 u_space[6]; + struct snd_codec_desc_src src; + } __attribute__((packed, aligned(4))); + __u32 reserved[8]; } __attribute__((packed, aligned(4))); /** struct snd_codec @@ -395,6 +411,8 @@ struct snd_codec_desc { * @align: Block alignment in bytes of an audio sample. * Only required for PCM or IEC formats. * @options: encoder-specific settings + * @pcm_format: Output (for decoders) or input (for encoders) + * PCM formats (required to accel mode, 0 for other modes) * @reserved: reserved for future use */ @@ -411,7 +429,8 @@ struct snd_codec { __u32 format; __u32 align; union snd_codec_options options; - __u32 reserved[3]; + __u32 pcm_format; + __u32 reserved[2]; } __attribute__((packed, aligned(4))); #endif diff --git a/include/uapi/sound/fcp.h b/include/uapi/sound/fcp.h new file mode 100644 index 000000000000..aea428956d8f --- /dev/null +++ b/include/uapi/sound/fcp.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Focusrite Control Protocol Driver for ALSA + * + * Copyright (c) 2024-2025 by Geoffrey D. Bennett <g at b4.vu> + */ +/* + * DOC: FCP (Focusrite Control Protocol) User-Space API + * + * This header defines the interface between the FCP kernel driver and + * user-space programs to enable the use of the proprietary features + * available in Focusrite USB audio interfaces. This includes Scarlett + * 2nd Gen, 3rd Gen, 4th Gen, Clarett USB, Clarett+, and Vocaster + * series devices. + * + * The interface is provided via ALSA's hwdep interface. Opening the + * hwdep device requires CAP_SYS_RAWIO privileges as this interface + * provides near-direct access. + * + * For details on the FCP protocol, refer to the kernel scarlett2 + * driver in sound/usb/mixer_scarlett2.c and the fcp-support project + * at https://github.com/geoffreybennett/fcp-support + * + * For examples of using these IOCTLs, see the fcp-server source in + * the fcp-support project. + * + * IOCTL Interface + * -------------- + * FCP_IOCTL_PVERSION: + * Returns the protocol version supported by the driver. + * + * FCP_IOCTL_INIT: + * Initialises the protocol and synchronises sequence numbers + * between the driver and device. Must be called at least once + * before sending commands. Can be safely called again at any time. + * + * FCP_IOCTL_CMD: + * Sends an FCP command to the device and returns the response. + * Requires prior initialisation via FCP_IOCTL_INIT. + * + * FCP_IOCTL_SET_METER_MAP: + * Configures the Level Meter control's mapping between device + * meters and control channels. Requires FCP_IOCTL_INIT to have been + * called first. The map size and number of slots cannot be changed + * after initial configuration, although the map itself can be + * updated. Once configured, the Level Meter remains functional even + * after the hwdep device is closed. + * + * FCP_IOCTL_SET_METER_LABELS: + * Set the labels for the Level Meter control. Requires + * FCP_IOCTL_SET_METER_MAP to have been called first. labels[] + * should contain a sequence of null-terminated labels corresponding + * to the control's channels. + */ +#ifndef __UAPI_SOUND_FCP_H +#define __UAPI_SOUND_FCP_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +#define FCP_HWDEP_MAJOR 2 +#define FCP_HWDEP_MINOR 0 +#define FCP_HWDEP_SUBMINOR 0 + +#define FCP_HWDEP_VERSION \ + ((FCP_HWDEP_MAJOR << 16) | \ + (FCP_HWDEP_MINOR << 8) | \ + FCP_HWDEP_SUBMINOR) + +#define FCP_HWDEP_VERSION_MAJOR(v) (((v) >> 16) & 0xFF) +#define FCP_HWDEP_VERSION_MINOR(v) (((v) >> 8) & 0xFF) +#define FCP_HWDEP_VERSION_SUBMINOR(v) ((v) & 0xFF) + +/* Get protocol version */ +#define FCP_IOCTL_PVERSION _IOR('S', 0x60, int) + +/* Start the protocol */ + +/* Step 0 and step 2 responses are variable length and placed in + * resp[] one after the other. + */ +struct fcp_init { + __u16 step0_resp_size; + __u16 step2_resp_size; + __u32 init1_opcode; + __u32 init2_opcode; + __u8 resp[]; +} __attribute__((packed)); + +#define FCP_IOCTL_INIT _IOWR('S', 0x64, struct fcp_init) + +/* Perform a command */ + +/* The request data is placed in data[] and the response data will + * overwrite it. + */ +struct fcp_cmd { + __u32 opcode; + __u16 req_size; + __u16 resp_size; + __u8 data[]; +} __attribute__((packed)); +#define FCP_IOCTL_CMD _IOWR('S', 0x65, struct fcp_cmd) + +/* Set the meter map */ +struct fcp_meter_map { + __u16 map_size; + __u16 meter_slots; + __s16 map[]; +} __attribute__((packed)); +#define FCP_IOCTL_SET_METER_MAP _IOW('S', 0x66, struct fcp_meter_map) + +/* Set the meter labels */ +struct fcp_meter_labels { + __u16 labels_size; + char labels[]; +} __attribute__((packed)); +#define FCP_IOCTL_SET_METER_LABELS _IOW('S', 0x67, struct fcp_meter_labels) + +#endif /* __UAPI_SOUND_FCP_H */ diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h index 0a246bc218d3..c28c766270de 100644 --- a/include/uapi/sound/sof/tokens.h +++ b/include/uapi/sound/sof/tokens.h @@ -153,6 +153,8 @@ /* Stream */ #define SOF_TKN_STREAM_PLAYBACK_COMPATIBLE_D0I3 1200 #define SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3 1201 +#define SOF_TKN_STREAM_PLAYBACK_PAUSE_SUPPORTED 1202 +#define SOF_TKN_STREAM_CAPTURE_PAUSE_SUPPORTED 1203 /* Led control for mute switches */ #define SOF_TKN_MUTE_LED_USE 1300 diff --git a/include/uapi/sound/tlv.h b/include/uapi/sound/tlv.h index b99a2414b53d..5bb55c80e095 100644 --- a/include/uapi/sound/tlv.h +++ b/include/uapi/sound/tlv.h @@ -18,6 +18,8 @@ #define SNDRV_CTL_TLVT_CHMAP_VAR 0x102 /* channels freely swappable */ #define SNDRV_CTL_TLVT_CHMAP_PAIRED 0x103 /* pair-wise swappable */ +#define SNDRV_CTL_TLVT_FCP_CHANNEL_LABELS 0x110 /* channel labels */ + /* * TLV structure is right behind the struct snd_ctl_tlv: * unsigned int type - see SNDRV_CTL_TLVT_* diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index e594abe5d05f..89672ad8c3bb 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -180,7 +180,6 @@ enum attr_idn { QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D, QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E, QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F, - QUERY_ATTR_IDN_EXT_IID_EN = 0x2A, QUERY_ATTR_IDN_TIMESTAMP = 0x30 }; @@ -391,7 +390,6 @@ enum { UFS_DEV_EXT_TEMP_NOTIF = BIT(6), UFS_DEV_HPB_SUPPORT = BIT(7), UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), - UFS_DEV_EXT_IID_SUP = BIT(16), }; #define UFS_DEV_HPB_SUPPORT_VERSION 0x310 @@ -585,9 +583,6 @@ struct ufs_dev_info { bool b_advanced_rpmb_en; - /* UFS EXT_IID Enable */ - bool b_ext_iid_en; - /* UFS RTC */ enum ufs_rtc_time rtc_type; time64_t rtc_time_baseline; diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 74e5b9960c54..650ff238cd74 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -326,7 +326,6 @@ struct ufs_pwr_mode_info { * @phy_initialization: used to initialize phys * @device_reset: called to issue a reset pulse on the UFS device * @config_scaling_param: called to configure clock scaling parameters - * @program_key: program or evict an inline encryption key * @fill_crypto_prdt: initialize crypto-related fields in the PRDT * @event_notify: called to notify important events * @mcq_config_resource: called to configure MCQ platform resources @@ -373,8 +372,6 @@ struct ufs_hba_variant_ops { void (*config_scaling_param)(struct ufs_hba *hba, struct devfreq_dev_profile *profile, struct devfreq_simple_ondemand_data *data); - int (*program_key)(struct ufs_hba *hba, - const union ufs_crypto_cfg_entry *cfg, int slot); int (*fill_crypto_prdt)(struct ufs_hba *hba, const struct bio_crypt_ctx *crypt_ctx, void *prdt, unsigned int num_segments); @@ -403,6 +400,9 @@ enum clk_gating_state { * delay_ms * @ungate_work: worker to turn on clocks that will be used in case of * interrupt context + * @clk_gating_workq: workqueue for clock gating work. + * @lock: serialize access to some struct ufs_clk_gating members. An outer lock + * relative to the host lock * @state: the current clocks state * @delay_ms: gating delay in ms * @is_suspended: clk gating is suspended when set to 1 which can be used @@ -413,11 +413,14 @@ enum clk_gating_state { * @is_initialized: Indicates whether clock gating is initialized or not * @active_reqs: number of requests that are pending and should be waited for * completion before gating clocks. - * @clk_gating_workq: workqueue for clock gating work. */ struct ufs_clk_gating { struct delayed_work gate_work; struct work_struct ungate_work; + struct workqueue_struct *clk_gating_workq; + + spinlock_t lock; + enum clk_gating_state state; unsigned long delay_ms; bool is_suspended; @@ -426,11 +429,14 @@ struct ufs_clk_gating { bool is_enabled; bool is_initialized; int active_reqs; - struct workqueue_struct *clk_gating_workq; }; /** * struct ufs_clk_scaling - UFS clock scaling related data + * @workq: workqueue to schedule devfreq suspend/resume work + * @suspend_work: worker to suspend devfreq + * @resume_work: worker to resume devfreq + * @lock: serialize access to some struct ufs_clk_scaling members * @active_reqs: number of requests that are pending. If this is zero when * devfreq ->target() function is called then schedule "suspend_work" to * suspend devfreq. @@ -440,9 +446,6 @@ struct ufs_clk_gating { * @enable_attr: sysfs attribute to enable/disable clock scaling * @saved_pwr_info: UFS power mode may also be changed during scaling and this * one keeps track of previous power mode. - * @workq: workqueue to schedule devfreq suspend/resume work - * @suspend_work: worker to suspend devfreq - * @resume_work: worker to resume devfreq * @target_freq: frequency requested by devfreq framework * @min_gear: lowest HS gear to scale down to * @is_enabled: tracks if scaling is currently enabled or not, controlled by @@ -454,15 +457,18 @@ struct ufs_clk_gating { * @is_suspended: tracks if devfreq is suspended or not */ struct ufs_clk_scaling { + struct workqueue_struct *workq; + struct work_struct suspend_work; + struct work_struct resume_work; + + spinlock_t lock; + int active_reqs; unsigned long tot_busy_t; ktime_t window_start_t; ktime_t busy_start_t; struct device_attribute enable_attr; struct ufs_pa_layer_attr saved_pwr_info; - struct workqueue_struct *workq; - struct work_struct suspend_work; - struct work_struct resume_work; unsigned long target_freq; u32 min_gear; bool is_enabled; @@ -946,7 +952,6 @@ enum ufshcd_mcq_opr { * @nr_queues: number of Queues of different queue types * @complete_put: whether or not to call ufshcd_rpm_put() from inside * ufshcd_resume_complete() - * @ext_iid_sup: is EXT_IID is supported by UFSHC * @mcq_sup: is mcq supported by UFSHC * @mcq_enabled: is mcq ready to accept requests * @res: array of resource info of MCQ registers @@ -1112,7 +1117,6 @@ struct ufs_hba { unsigned int nr_hw_queues; unsigned int nr_queues[HCTX_MAX_TYPES]; bool complete_put; - bool ext_iid_sup; bool scsi_host_added; bool mcq_sup; bool lsdb_sup; @@ -1202,6 +1206,14 @@ static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba) ({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); }) #endif +#ifdef CONFIG_SCSI_UFS_CRYPTO +static inline struct ufs_hba * +ufs_hba_from_crypto_profile(struct blk_crypto_profile *profile) +{ + return container_of(profile, struct ufs_hba, crypto_profile); +} +#endif + static inline size_t ufshcd_get_ucd_size(const struct ufs_hba *hba) { return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba); diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h index 27364c4a6ef9..612500a7088f 100644 --- a/include/ufs/ufshci.h +++ b/include/ufs/ufshci.h @@ -82,11 +82,6 @@ enum { MASK_MCQ_SUPPORT = 0x40000000, }; -/* MCQ capability mask */ -enum { - MASK_EXT_IID_SUPPORT = 0x00000400, -}; - enum { REG_SQATTR = 0x0, REG_SQLBA = 0x4, diff --git a/include/vdso/page.h b/include/vdso/page.h index 710ae2414e68..bc47186c07fc 100644 --- a/include/vdso/page.h +++ b/include/vdso/page.h @@ -8,7 +8,7 @@ * PAGE_SHIFT determines the page size. * * Note: This definition is required because PAGE_SHIFT is used - * in several places throuout the codebase. + * in several places throughout the codebase. */ #define PAGE_SHIFT CONFIG_PAGE_SHIFT |