diff options
Diffstat (limited to 'drivers/platform/x86/intel')
30 files changed, 818 insertions, 221 deletions
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 10cd65497cc1..445e7a59beb4 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/string_choices.h> #include <linux/suspend.h> #include "../dual_accel_detect.h" @@ -331,10 +332,8 @@ static int intel_hid_set_enable(struct device *device, bool enable) acpi_handle handle = ACPI_HANDLE(device); /* Enable|disable features - power button is always enabled */ - if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN, - enable)) { - dev_warn(device, "failed to %sable hotkeys\n", - enable ? "en" : "dis"); + if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN, enable)) { + dev_warn(device, "failed to %s hotkeys\n", str_enable_disable(enable)); return -EIO; } diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 33412a584836..bc252b883210 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -32,6 +32,7 @@ bool *ifs_pkg_auth; static const struct ifs_test_caps scan_test = { .integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT, .test_num = IFS_TYPE_SAF, + .image_suffix = "scan", }; static const struct ifs_test_caps array_test = { @@ -39,9 +40,32 @@ static const struct ifs_test_caps array_test = { .test_num = IFS_TYPE_ARRAY_BIST, }; +static const struct ifs_test_msrs scan_msrs = { + .copy_hashes = MSR_COPY_SCAN_HASHES, + .copy_hashes_status = MSR_SCAN_HASHES_STATUS, + .copy_chunks = MSR_AUTHENTICATE_AND_COPY_CHUNK, + .copy_chunks_status = MSR_CHUNKS_AUTHENTICATION_STATUS, + .test_ctrl = MSR_SAF_CTRL, +}; + +static const struct ifs_test_msrs sbaf_msrs = { + .copy_hashes = MSR_COPY_SBAF_HASHES, + .copy_hashes_status = MSR_SBAF_HASHES_STATUS, + .copy_chunks = MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK, + .copy_chunks_status = MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS, + .test_ctrl = MSR_SBAF_CTRL, +}; + +static const struct ifs_test_caps sbaf_test = { + .integrity_cap_bit = MSR_INTEGRITY_CAPS_SBAF_BIT, + .test_num = IFS_TYPE_SBAF, + .image_suffix = "sbft", +}; + static struct ifs_device ifs_devices[] = { [IFS_TYPE_SAF] = { .test_caps = &scan_test, + .test_msrs = &scan_msrs, .misc = { .name = "intel_ifs_0", .minor = MISC_DYNAMIC_MINOR, @@ -56,6 +80,15 @@ static struct ifs_device ifs_devices[] = { .groups = plat_ifs_array_groups, }, }, + [IFS_TYPE_SBAF] = { + .test_caps = &sbaf_test, + .test_msrs = &sbaf_msrs, + .misc = { + .name = "intel_ifs_2", + .minor = MISC_DYNAMIC_MINOR, + .groups = plat_ifs_groups, + }, + }, }; #define IFS_NUMTESTS ARRAY_SIZE(ifs_devices) diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 56b9f3e3cf76..5c3c0dfa1bf8 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -126,11 +126,40 @@ * The driver does not make use of this, it only tests one core at a time. * * .. [#f1] https://github.com/intel/TBD + * + * + * Structural Based Functional Test at Field (SBAF): + * ------------------------------------------------- + * + * SBAF is a new type of testing that provides comprehensive core test + * coverage complementing Scan at Field (SAF) testing. SBAF mimics the + * manufacturing screening environment and leverages the same test suite. + * It makes use of Design For Test (DFT) observation sites and features + * to maximize coverage in minimum time. + * + * Similar to the SAF test, SBAF isolates the core under test from the + * rest of the system during execution. Upon completion, the core + * seamlessly resets to its pre-test state and resumes normal operation. + * Any machine checks or hangs encountered during the test are confined to + * the isolated core, preventing disruption to the overall system. + * + * Like the SAF test, the SBAF test is also divided into multiple batches, + * and each batch test can take hundreds of milliseconds (100-200 ms) to + * complete. If such a lengthy interruption is undesirable, it is + * recommended to relocate the time-sensitive applications to other cores. */ #include <linux/device.h> #include <linux/miscdevice.h> #define MSR_ARRAY_BIST 0x00000105 + +#define MSR_COPY_SBAF_HASHES 0x000002b8 +#define MSR_SBAF_HASHES_STATUS 0x000002b9 +#define MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK 0x000002ba +#define MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS 0x000002bb +#define MSR_ACTIVATE_SBAF 0x000002bc +#define MSR_SBAF_STATUS 0x000002bd + #define MSR_COPY_SCAN_HASHES 0x000002c2 #define MSR_SCAN_HASHES_STATUS 0x000002c3 #define MSR_AUTHENTICATE_AND_COPY_CHUNK 0x000002c4 @@ -140,6 +169,7 @@ #define MSR_ARRAY_TRIGGER 0x000002d6 #define MSR_ARRAY_STATUS 0x000002d7 #define MSR_SAF_CTRL 0x000004f0 +#define MSR_SBAF_CTRL 0x000004f8 #define SCAN_NOT_TESTED 0 #define SCAN_TEST_PASS 1 @@ -147,6 +177,7 @@ #define IFS_TYPE_SAF 0 #define IFS_TYPE_ARRAY_BIST 1 +#define IFS_TYPE_SBAF 2 #define ARRAY_GEN0 0 #define ARRAY_GEN1 1 @@ -196,7 +227,8 @@ union ifs_chunks_auth_status_gen2 { u16 valid_chunks; u16 total_chunks; u32 error_code :8; - u32 rsvd2 :24; + u32 rsvd2 :8; + u32 max_bundle :16; }; }; @@ -253,6 +285,34 @@ union ifs_array { }; }; +/* MSR_ACTIVATE_SBAF bit fields */ +union ifs_sbaf { + u64 data; + struct { + u32 bundle_idx :9; + u32 rsvd1 :5; + u32 pgm_idx :2; + u32 rsvd2 :16; + u32 delay :31; + u32 sigmce :1; + }; +}; + +/* MSR_SBAF_STATUS bit fields */ +union ifs_sbaf_status { + u64 data; + struct { + u32 bundle_idx :9; + u32 rsvd1 :5; + u32 pgm_idx :2; + u32 rsvd2 :16; + u32 error_code :8; + u32 rsvd3 :21; + u32 test_fail :1; + u32 sbaf_status :2; + }; +}; + /* * Driver populated error-codes * 0xFD: Test timed out before completing all the chunks. @@ -261,9 +321,28 @@ union ifs_array { #define IFS_SW_TIMEOUT 0xFD #define IFS_SW_PARTIAL_COMPLETION 0xFE +#define IFS_SUFFIX_SZ 5 + struct ifs_test_caps { int integrity_cap_bit; int test_num; + char image_suffix[IFS_SUFFIX_SZ]; +}; + +/** + * struct ifs_test_msrs - MSRs used in IFS tests + * @copy_hashes: Copy test hash data + * @copy_hashes_status: Status of copied test hash data + * @copy_chunks: Copy chunks of the test data + * @copy_chunks_status: Status of the copied test data chunks + * @test_ctrl: Control the test attributes + */ +struct ifs_test_msrs { + u32 copy_hashes; + u32 copy_hashes_status; + u32 copy_chunks; + u32 copy_chunks_status; + u32 test_ctrl; }; /** @@ -278,6 +357,7 @@ struct ifs_test_caps { * @generation: IFS test generation enumerated by hardware * @chunk_size: size of a test chunk * @array_gen: test generation of array test + * @max_bundle: maximum bundle index */ struct ifs_data { int loaded_version; @@ -290,6 +370,7 @@ struct ifs_data { u32 generation; u32 chunk_size; u32 array_gen; + u32 max_bundle; }; struct ifs_work { @@ -299,6 +380,7 @@ struct ifs_work { struct ifs_device { const struct ifs_test_caps *test_caps; + const struct ifs_test_msrs *test_msrs; struct ifs_data rw_data; struct miscdevice misc; }; @@ -319,6 +401,14 @@ static inline const struct ifs_test_caps *ifs_get_test_caps(struct device *dev) return d->test_caps; } +static inline const struct ifs_test_msrs *ifs_get_test_msrs(struct device *dev) +{ + struct miscdevice *m = dev_get_drvdata(dev); + struct ifs_device *d = container_of(m, struct ifs_device, misc); + + return d->test_msrs; +} + extern bool *ifs_pkg_auth; int ifs_load_firmware(struct device *dev); int do_core_test(int cpu, struct device *dev); diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 39f19cb51749..de54bd1a5970 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -118,15 +118,17 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) union ifs_scan_hashes_status hashes_status; union ifs_chunks_auth_status chunk_status; struct device *dev = local_work->dev; + const struct ifs_test_msrs *msrs; int i, num_chunks, chunk_size; struct ifs_data *ifsd; u64 linear_addr, base; u32 err_code; ifsd = ifs_get_data(dev); + msrs = ifs_get_test_msrs(dev); /* run scan hash copy */ - wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); - rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + wrmsrl(msrs->copy_hashes, ifs_hash_ptr); + rdmsrl(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ num_chunks = hashes_status.num_chunks; @@ -147,8 +149,8 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) linear_addr = base + i * chunk_size; linear_addr |= i; - wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, linear_addr); - rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + wrmsrl(msrs->copy_chunks, linear_addr); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); ifsd->valid_chunks = chunk_status.valid_chunks; err_code = chunk_status.error_code; @@ -180,6 +182,7 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) union ifs_scan_hashes_status_gen2 hashes_status; union ifs_chunks_auth_status_gen2 chunk_status; u32 err_code, valid_chunks, total_chunks; + const struct ifs_test_msrs *msrs; int i, num_chunks, chunk_size; union meta_data *ifs_meta; int starting_chunk_nr; @@ -189,10 +192,11 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) int retry_count; ifsd = ifs_get_data(dev); + msrs = ifs_get_test_msrs(dev); if (need_copy_scan_hashes(ifsd)) { - wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); - rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + wrmsrl(msrs->copy_hashes, ifs_hash_ptr); + rdmsrl(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ chunk_size = hashes_status.chunk_size * SZ_1K; @@ -212,8 +216,8 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) } if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { - wrmsrl(MSR_SAF_CTRL, INVALIDATE_STRIDE); - rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); if (chunk_status.valid_chunks != 0) { dev_err(dev, "Couldn't invalidate installed stride - %d\n", chunk_status.valid_chunks); @@ -234,9 +238,9 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) chunk_table[1] = linear_addr; do { local_irq_disable(); - wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, (u64)chunk_table); + wrmsrl(msrs->copy_chunks, (u64)chunk_table); local_irq_enable(); - rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); err_code = chunk_status.error_code; } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); @@ -257,20 +261,22 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) return -EIO; } ifsd->valid_chunks = valid_chunks; + ifsd->max_bundle = chunk_status.max_bundle; return 0; } static int validate_ifs_metadata(struct device *dev) { + const struct ifs_test_caps *test = ifs_get_test_caps(dev); struct ifs_data *ifsd = ifs_get_data(dev); union meta_data *ifs_meta; char test_file[64]; int ret = -EINVAL; - snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.scan", + snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.%s", boot_cpu_data.x86, boot_cpu_data.x86_model, - boot_cpu_data.x86_stepping, ifsd->cur_batch); + boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix); ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS); if (!ifs_meta) { @@ -300,6 +306,12 @@ static int validate_ifs_metadata(struct device *dev) return ret; } + if (ifs_meta->test_type != test->test_num) { + dev_warn(dev, "Metadata test_type %d mismatches with device type\n", + ifs_meta->test_type); + return ret; + } + return 0; } @@ -387,9 +399,9 @@ int ifs_load_firmware(struct device *dev) char scan_path[64]; int ret; - snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan", + snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.%s", test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model, - boot_cpu_data.x86_stepping, ifsd->cur_batch); + boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix); ret = request_firmware_direct(&fw, scan_path, dev); if (ret) { diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index be3d51ed0e47..f978dd05d4d8 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -29,6 +29,13 @@ struct run_params { union ifs_status status; }; +struct sbaf_run_params { + struct ifs_data *ifsd; + int *retry_cnt; + union ifs_sbaf *activate; + union ifs_sbaf_status status; +}; + /* * Number of TSC cycles that a logical CPU will wait for the other * logical CPU on the core in the WRMSR(ACTIVATE_SCAN). @@ -146,6 +153,7 @@ static bool can_restart(union ifs_status status) #define SPINUNIT 100 /* 100 nsec */ static atomic_t array_cpus_in; static atomic_t scan_cpus_in; +static atomic_t sbaf_cpus_in; /* * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus() @@ -387,6 +395,225 @@ static void ifs_array_test_gen1(int cpu, struct device *dev) ifsd->status = SCAN_TEST_PASS; } +#define SBAF_STATUS_PASS 0 +#define SBAF_STATUS_SIGN_FAIL 1 +#define SBAF_STATUS_INTR 2 +#define SBAF_STATUS_TEST_FAIL 3 + +enum sbaf_status_err_code { + IFS_SBAF_NO_ERROR = 0, + IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN = 1, + IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS = 2, + IFS_SBAF_UNASSIGNED_ERROR_CODE3 = 3, + IFS_SBAF_INVALID_BUNDLE_INDEX = 4, + IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS = 5, + IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY = 6, + IFS_SBAF_UNASSIGNED_ERROR_CODE7 = 7, + IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8, + IFS_SBAF_INTERRUPTED_DURING_EXECUTION = 9, + IFS_SBAF_INVALID_PROGRAM_INDEX = 0xA, + IFS_SBAF_CORRUPTED_CHUNK = 0xB, + IFS_SBAF_DID_NOT_START = 0xC, +}; + +static const char * const sbaf_test_status[] = { + [IFS_SBAF_NO_ERROR] = "SBAF no error", + [IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN] = "Other thread could not join.", + [IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS] = "Interrupt occurred prior to SBAF coordination.", + [IFS_SBAF_UNASSIGNED_ERROR_CODE3] = "Unassigned error code 0x3", + [IFS_SBAF_INVALID_BUNDLE_INDEX] = "Non-valid sbaf bundles. Reload test image", + [IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS] = "Mismatch in arguments between threads T0/T1.", + [IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY] = "Core not capable of performing SBAF currently", + [IFS_SBAF_UNASSIGNED_ERROR_CODE7] = "Unassigned error code 0x7", + [IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently", + [IFS_SBAF_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SBAF start", + [IFS_SBAF_INVALID_PROGRAM_INDEX] = "SBAF program index not valid", + [IFS_SBAF_CORRUPTED_CHUNK] = "SBAF operation aborted due to corrupted chunk", + [IFS_SBAF_DID_NOT_START] = "SBAF operation did not start", +}; + +static void sbaf_message_not_tested(struct device *dev, int cpu, u64 status_data) +{ + union ifs_sbaf_status status = (union ifs_sbaf_status)status_data; + + if (status.error_code < ARRAY_SIZE(sbaf_test_status)) { + dev_info(dev, "CPU(s) %*pbl: SBAF operation did not start. %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + sbaf_test_status[status.error_code]); + } else if (status.error_code == IFS_SW_TIMEOUT) { + dev_info(dev, "CPU(s) %*pbl: software timeout during scan\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } else if (status.error_code == IFS_SW_PARTIAL_COMPLETION) { + dev_info(dev, "CPU(s) %*pbl: %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + "Not all SBAF bundles executed. Maximum forward progress retries exceeded"); + } else { + dev_info(dev, "CPU(s) %*pbl: SBAF unknown status %llx\n", + cpumask_pr_args(cpu_smt_mask(cpu)), status.data); + } +} + +static void sbaf_message_fail(struct device *dev, int cpu, union ifs_sbaf_status status) +{ + /* Failed signature check is set when SBAF signature did not match the expected value */ + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL) { + dev_err(dev, "CPU(s) %*pbl: Failed signature check\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } + + /* Failed to reach end of test */ + if (status.sbaf_status == SBAF_STATUS_TEST_FAIL) { + dev_err(dev, "CPU(s) %*pbl: Failed to complete test\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } +} + +static bool sbaf_bundle_completed(union ifs_sbaf_status status) +{ + return !(status.sbaf_status || status.error_code); +} + +static bool sbaf_can_restart(union ifs_sbaf_status status) +{ + enum sbaf_status_err_code err_code = status.error_code; + + /* Signature for chunk is bad, or scan test failed */ + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL || + status.sbaf_status == SBAF_STATUS_TEST_FAIL) + return false; + + switch (err_code) { + case IFS_SBAF_NO_ERROR: + case IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN: + case IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS: + case IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT: + case IFS_SBAF_INTERRUPTED_DURING_EXECUTION: + return true; + case IFS_SBAF_UNASSIGNED_ERROR_CODE3: + case IFS_SBAF_INVALID_BUNDLE_INDEX: + case IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS: + case IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY: + case IFS_SBAF_UNASSIGNED_ERROR_CODE7: + case IFS_SBAF_INVALID_PROGRAM_INDEX: + case IFS_SBAF_CORRUPTED_CHUNK: + case IFS_SBAF_DID_NOT_START: + break; + } + return false; +} + +/* + * Execute the SBAF test. Called "simultaneously" on all threads of a core + * at high priority using the stop_cpus mechanism. + */ +static int dosbaf(void *data) +{ + struct sbaf_run_params *run_params = data; + int cpu = smp_processor_id(); + union ifs_sbaf_status status; + struct ifs_data *ifsd; + int first; + + ifsd = run_params->ifsd; + + /* Only the first logical CPU on a core reports result */ + first = cpumask_first(cpu_smt_mask(cpu)); + wait_for_sibling_cpu(&sbaf_cpus_in, NSEC_PER_SEC); + + /* + * This WRMSR will wait for other HT threads to also write + * to this MSR (at most for activate.delay cycles). Then it + * starts scan of each requested bundle. The core test happens + * during the "execution" of the WRMSR. + */ + wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data); + rdmsrl(MSR_SBAF_STATUS, status.data); + trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status); + + /* Pass back the result of the test */ + if (cpu == first) + run_params->status = status; + + return 0; +} + +static void ifs_sbaf_test_core(int cpu, struct device *dev) +{ + struct sbaf_run_params run_params; + union ifs_sbaf_status status = {}; + union ifs_sbaf activate; + unsigned long timeout; + struct ifs_data *ifsd; + int stop_bundle; + int retries; + + ifsd = ifs_get_data(dev); + + activate.data = 0; + activate.delay = IFS_THREAD_WAIT; + + timeout = jiffies + 2 * HZ; + retries = MAX_IFS_RETRIES; + activate.bundle_idx = 0; + stop_bundle = ifsd->max_bundle; + + while (activate.bundle_idx <= stop_bundle) { + if (time_after(jiffies, timeout)) { + status.error_code = IFS_SW_TIMEOUT; + break; + } + + atomic_set(&sbaf_cpus_in, 0); + + run_params.ifsd = ifsd; + run_params.activate = &activate; + run_params.retry_cnt = &retries; + stop_core_cpuslocked(cpu, dosbaf, &run_params); + + status = run_params.status; + + if (sbaf_bundle_completed(status)) { + activate.bundle_idx = status.bundle_idx + 1; + activate.pgm_idx = 0; + retries = MAX_IFS_RETRIES; + continue; + } + + /* Some cases can be retried, give up for others */ + if (!sbaf_can_restart(status)) + break; + + if (status.pgm_idx == activate.pgm_idx) { + /* If no progress retry */ + if (--retries == 0) { + if (status.error_code == IFS_NO_ERROR) + status.error_code = IFS_SW_PARTIAL_COMPLETION; + break; + } + } else { + /* if some progress, more pgms remaining in bundle, reset retries */ + retries = MAX_IFS_RETRIES; + activate.bundle_idx = status.bundle_idx; + activate.pgm_idx = status.pgm_idx; + } + } + + /* Update status for this core */ + ifsd->scan_details = status.data; + + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL || + status.sbaf_status == SBAF_STATUS_TEST_FAIL) { + ifsd->status = SCAN_TEST_FAIL; + sbaf_message_fail(dev, cpu, status); + } else if (status.error_code || status.sbaf_status == SBAF_STATUS_INTR || + (activate.bundle_idx < stop_bundle)) { + ifsd->status = SCAN_NOT_TESTED; + sbaf_message_not_tested(dev, cpu, status.data); + } else { + ifsd->status = SCAN_TEST_PASS; + } +} + /* * Initiate per core test. It wakes up work queue threads on the target cpu and * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and @@ -420,6 +647,12 @@ int do_core_test(int cpu, struct device *dev) else ifs_array_test_gen1(cpu, dev); break; + case IFS_TYPE_SBAF: + if (!ifsd->loaded) + ret = -EPERM; + else + ifs_sbaf_test_core(cpu, dev); + break; default: ret = -EINVAL; } diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile index 9f16cb514397..a8aba07bf1dc 100644 --- a/drivers/platform/x86/intel/int3472/Makefile +++ b/drivers/platform/x86/intel/int3472/Makefile @@ -1,4 +1,7 @@ obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472_discrete.o \ - intel_skl_int3472_tps68470.o -intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o led.o common.o -intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o common.o + intel_skl_int3472_tps68470.o \ + intel_skl_int3472_common.o +intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o led.o +intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o + +intel_skl_int3472_common-y += common.o diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c index 9db2bb0bbba4..b3a2578e06c1 100644 --- a/drivers/platform/x86/intel/int3472/common.c +++ b/drivers/platform/x86/intel/int3472/common.c @@ -29,6 +29,7 @@ union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *i return obj; } +EXPORT_SYMBOL_GPL(skl_int3472_get_acpi_buffer); int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb) { @@ -52,6 +53,7 @@ out_free_obj: kfree(obj); return ret; } +EXPORT_SYMBOL_GPL(skl_int3472_fill_cldb); /* sensor_adev_ret may be NULL, name_ret must not be NULL */ int skl_int3472_get_sensor_adev_and_name(struct device *dev, @@ -80,3 +82,8 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev, return ret; } +EXPORT_SYMBOL_GPL(skl_int3472_get_sensor_adev_and_name); + +MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Device Driver library"); +MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index 07b302e09340..3de463c3d13b 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/overflow.h> #include <linux/platform_device.h> +#include <linux/string_choices.h> #include <linux/uuid.h> #include "common.h" @@ -69,11 +70,7 @@ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry, if (!adev) return -ENODEV; - table_entry->key = acpi_dev_name(adev); - table_entry->chip_hwnum = agpio->pin_table[0]; - table_entry->con_id = func; - table_entry->idx = 0; - table_entry->flags = polarity; + *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, polarity); return 0; } @@ -234,7 +231,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func, agpio->resource_source.string_ptr, agpio->pin_table[0], - (polarity == GPIO_ACTIVE_HIGH) ? "high" : "low"); + str_high_low(polarity == GPIO_ACTIVE_HIGH)); switch (type) { case INT3472_GPIO_TYPE_RESET: diff --git a/drivers/platform/x86/intel/oaktrail.c b/drivers/platform/x86/intel/oaktrail.c index 217630f40c3f..265cef327b4f 100644 --- a/drivers/platform/x86/intel/oaktrail.c +++ b/drivers/platform/x86/intel/oaktrail.c @@ -28,7 +28,6 @@ #include <linux/backlight.h> #include <linux/dmi.h> #include <linux/err.h> -#include <linux/fb.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> @@ -250,7 +249,7 @@ static int oaktrail_backlight_init(void) oaktrail_bl_device = bd; bd->props.brightness = get_backlight_brightness(bd); - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); return 0; diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c index e7878558fd90..9d9c07f44ff6 100644 --- a/drivers/platform/x86/intel/pmc/adl.c +++ b/drivers/platform/x86/intel/pmc/adl.c @@ -295,6 +295,8 @@ const struct pmc_reg_map adl_reg_map = { .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, + .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED, .lpm_num_modes = ADL_LPM_NUM_MODES, .lpm_num_maps = ADL_LPM_NUM_MAPS, diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c index dd72974bf71e..513c02670c5a 100644 --- a/drivers/platform/x86/intel/pmc/cnp.c +++ b/drivers/platform/x86/intel/pmc/cnp.c @@ -200,6 +200,8 @@ const struct pmc_reg_map cnp_reg_map = { .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, + .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED, .etr3_offset = ETR3_OFFSET, }; diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index 01ae71c6df59..ecb47f8b4f83 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/acpi_pmtmr.h> #include <linux/bitfield.h> #include <linux/debugfs.h> #include <linux/delay.h> @@ -714,6 +715,49 @@ static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused) } DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker); +static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) { + struct pmc *pmc; + u32 ltr_ign; + + pmc = pmcdev->pmcs[i]; + if (!pmc) + continue; + + guard(mutex)(&pmcdev->lock); + pmc->ltr_ign = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset); + + /* ltr_ignore_max is the max index value for LTR ignore register */ + ltr_ign = pmc->ltr_ign | GENMASK(pmc->map->ltr_ignore_max, 0); + pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, ltr_ign); + } + + /* + * Ignoring ME during suspend is blocking platforms with ADL PCH to get to + * deeper S0ix substate. + */ + pmc_core_send_ltr_ignore(pmcdev, 6, 0); +} + +static void pmc_core_ltr_restore_all(struct pmc_dev *pmcdev) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) { + struct pmc *pmc; + + pmc = pmcdev->pmcs[i]; + if (!pmc) + continue; + + guard(mutex)(&pmcdev->lock); + pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, pmc->ltr_ign); + } +} + static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset, const int lpm_adj_x2) { @@ -728,12 +772,11 @@ static int pmc_core_substate_res_show(struct seq_file *s, void *unused) struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2; u32 offset = pmc->map->lpm_residency_offset; - unsigned int i; int mode; seq_printf(s, "%-10s %-15s\n", "Substate", "Residency"); - pmc_for_each_mode(i, mode, pmcdev) { + pmc_for_each_mode(mode, pmcdev) { seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode], adjust_lpm_residency(pmc, offset + (4 * mode), lpm_adj_x2)); } @@ -787,20 +830,21 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs); static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index) { struct pmc_dev *pmcdev = s->private; - unsigned int i; int mode; seq_printf(s, "%30s |", "Element"); - pmc_for_each_mode(i, mode, pmcdev) + pmc_for_each_mode(mode, pmcdev) seq_printf(s, " %9s |", pmc_lpm_modes[mode]); - seq_printf(s, " %9s |\n", "Status"); + seq_printf(s, " %9s |", "Status"); + seq_printf(s, " %11s |\n", "Live Status"); } static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; u32 sts_offset; + u32 sts_offset_live; u32 *lpm_req_regs; unsigned int mp, pmc_index; int num_maps; @@ -815,6 +859,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) maps = pmc->map->lpm_sts; num_maps = pmc->map->lpm_num_maps; sts_offset = pmc->map->lpm_status_offset; + sts_offset_live = pmc->map->lpm_live_status_offset; lpm_req_regs = pmc->lpm_req_regs; /* @@ -832,20 +877,24 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) for (mp = 0; mp < num_maps; mp++) { u32 req_mask = 0; u32 lpm_status; + u32 lpm_status_live; const struct pmc_bit_map *map; - int mode, idx, i, len = 32; + int mode, i, len = 32; /* * Capture the requirements and create a mask so that we only * show an element if it's required for at least one of the * enabled low power modes */ - pmc_for_each_mode(idx, mode, pmcdev) + pmc_for_each_mode(mode, pmcdev) req_mask |= lpm_req_regs[mp + (mode * num_maps)]; /* Get the last latched status for this map */ lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4)); + /* Get the runtime status for this map */ + lpm_status_live = pmc_core_reg_read(pmc, sts_offset_live + (mp * 4)); + /* Loop over elements in this map */ map = maps[mp]; for (i = 0; map[i].name && i < len; i++) { @@ -863,7 +912,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) seq_printf(s, "pmc%d: %26s |", pmc_index, map[i].name); /* Loop over the enabled states and display if required */ - pmc_for_each_mode(idx, mode, pmcdev) { + pmc_for_each_mode(mode, pmcdev) { bool required = lpm_req_regs[mp + (mode * num_maps)] & bit_mask; seq_printf(s, " %9s |", required ? "Required" : " "); @@ -872,6 +921,9 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) /* In Status column, show the last captured state of this agent */ seq_printf(s, " %9s |", lpm_status & bit_mask ? "Yes" : " "); + /* In Live status column, show the live state of this agent */ + seq_printf(s, " %11s |", lpm_status_live & bit_mask ? "Yes" : " "); + seq_puts(s, "\n"); } } @@ -925,7 +977,6 @@ static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; - unsigned int idx; bool c10; u32 reg; int mode; @@ -939,7 +990,7 @@ static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused) c10 = true; } - pmc_for_each_mode(idx, mode, pmcdev) { + pmc_for_each_mode(mode, pmcdev) { if ((BIT(mode) & reg) && !c10) seq_printf(s, " [%s]", pmc_lpm_modes[mode]); else @@ -960,7 +1011,6 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file, struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; bool clear = false, c10 = false; unsigned char buf[8]; - unsigned int idx; int m, mode; u32 reg; @@ -979,7 +1029,7 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file, mode = sysfs_match_string(pmc_lpm_modes, buf); /* Check string matches enabled mode */ - pmc_for_each_mode(idx, m, pmcdev) + pmc_for_each_mode(m, pmcdev) if (mode == m) break; @@ -1208,6 +1258,38 @@ static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev) return val == 1; } +/* + * Enable or disable ACPI PM Timer + * + * This function is intended to be a callback for ACPI PM suspend/resume event. + * The ACPI PM Timer is enabled on resume only if it was enabled during suspend. + */ +static void pmc_core_acpi_pm_timer_suspend_resume(void *data, bool suspend) +{ + struct pmc_dev *pmcdev = data; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const struct pmc_reg_map *map = pmc->map; + bool enabled; + u32 reg; + + if (!map->acpi_pm_tmr_ctl_offset) + return; + + guard(mutex)(&pmcdev->lock); + + if (!suspend && !pmcdev->enable_acpi_pm_timer_on_resume) + return; + + reg = pmc_core_reg_read(pmc, map->acpi_pm_tmr_ctl_offset); + enabled = !(reg & map->acpi_pm_tmr_disable_bit); + if (suspend) + reg |= map->acpi_pm_tmr_disable_bit; + else + reg &= ~map->acpi_pm_tmr_disable_bit; + pmc_core_reg_write(pmc, map->acpi_pm_tmr_ctl_offset, reg); + + pmcdev->enable_acpi_pm_timer_on_resume = suspend && enabled; +} static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) { @@ -1404,6 +1486,7 @@ static int pmc_core_probe(struct platform_device *pdev) struct pmc_dev *pmcdev; const struct x86_cpu_id *cpu_id; int (*core_init)(struct pmc_dev *pmcdev); + const struct pmc_reg_map *map; struct pmc *primary_pmc; int ret; @@ -1462,6 +1545,11 @@ static int pmc_core_probe(struct platform_device *pdev) pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) * pmc_core_adjust_slp_s0_step(primary_pmc, 1)); + map = primary_pmc->map; + if (map->acpi_pm_tmr_ctl_offset) + acpi_pmtmr_register_suspend_resume_callback(pmc_core_acpi_pm_timer_suspend_resume, + pmcdev); + device_initialized = true; dev_info(&pdev->dev, " initialized\n"); @@ -1471,6 +1559,12 @@ static int pmc_core_probe(struct platform_device *pdev) static void pmc_core_remove(struct platform_device *pdev) { struct pmc_dev *pmcdev = platform_get_drvdata(pdev); + const struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const struct pmc_reg_map *map = pmc->map; + + if (map->acpi_pm_tmr_ctl_offset) + acpi_pmtmr_unregister_suspend_resume_callback(); + pmc_core_dbgfs_unregister(pmcdev); pmc_core_clean_structure(pdev); } @@ -1479,6 +1573,10 @@ static bool warn_on_s0ix_failures; module_param(warn_on_s0ix_failures, bool, 0644); MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures"); +static bool ltr_ignore_all_suspend = true; +module_param(ltr_ignore_all_suspend, bool, 0644); +MODULE_PARM_DESC(ltr_ignore_all_suspend, "Ignore all LTRs during suspend"); + static __maybe_unused int pmc_core_suspend(struct device *dev) { struct pmc_dev *pmcdev = dev_get_drvdata(dev); @@ -1488,6 +1586,9 @@ static __maybe_unused int pmc_core_suspend(struct device *dev) if (pmcdev->suspend) pmcdev->suspend(pmcdev); + if (ltr_ignore_all_suspend) + pmc_core_ltr_ignore_all(pmcdev); + /* Check if the syspend will actually use S0ix */ if (pm_suspend_via_firmware()) return 0; @@ -1594,6 +1695,9 @@ static __maybe_unused int pmc_core_resume(struct device *dev) { struct pmc_dev *pmcdev = dev_get_drvdata(dev); + if (ltr_ignore_all_suspend) + pmc_core_ltr_restore_all(pmcdev); + if (pmcdev->resume) return pmcdev->resume(pmcdev); diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h index ea04de7eb9e8..75fd593a7b0f 100644 --- a/drivers/platform/x86/intel/pmc/core.h +++ b/drivers/platform/x86/intel/pmc/core.h @@ -68,6 +68,8 @@ struct telem_endpoint; #define SPT_PMC_LTR_SCC 0x3A0 #define SPT_PMC_LTR_ISH 0x3A4 +#define SPT_PMC_ACPI_PM_TMR_CTL_OFFSET 0x18FC + /* Sunrise Point: PGD PFET Enable Ack Status Registers */ enum ppfear_regs { SPT_PMC_XRAM_PPFEAR0A = 0x590, @@ -148,6 +150,8 @@ enum ppfear_regs { #define SPT_PMC_VRIC1_SLPS0LVEN BIT(13) #define SPT_PMC_VRIC1_XTALSDQDIS BIT(22) +#define SPT_PMC_BIT_ACPI_PM_TMR_DISABLE BIT(1) + /* Cannonlake Power Management Controller register offsets */ #define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4 #define CNP_PMC_PM_CFG_OFFSET 0x1818 @@ -351,6 +355,8 @@ struct pmc_reg_map { const u8 *lpm_reg_index; const u32 pson_residency_offset; const u32 pson_residency_counter_step; + const u32 acpi_pm_tmr_ctl_offset; + const u32 acpi_pm_tmr_disable_bit; }; /** @@ -372,6 +378,7 @@ struct pmc_info { * @map: pointer to pmc_reg_map struct that contains platform * specific attributes * @lpm_req_regs: List of substate requirements + * @ltr_ign: Holds LTR ignore data while suspended * * pmc contains info about one power management controller device. */ @@ -380,6 +387,7 @@ struct pmc { void __iomem *regbase; const struct pmc_reg_map *map; u32 *lpm_req_regs; + u32 ltr_ign; }; /** @@ -424,6 +432,8 @@ struct pmc_dev { u32 die_c6_offset; struct telem_endpoint *punit_ep; struct pmc_info *regmap_list; + + bool enable_acpi_pm_timer_on_resume; }; enum pmc_index { @@ -604,10 +614,12 @@ int lnl_core_init(struct pmc_dev *pmcdev); void cnl_suspend(struct pmc_dev *pmcdev); int cnl_resume(struct pmc_dev *pmcdev); -#define pmc_for_each_mode(i, mode, pmcdev) \ - for (i = 0, mode = pmcdev->lpm_en_modes[i]; \ - i < pmcdev->num_lpm_modes; \ - i++, mode = pmcdev->lpm_en_modes[i]) +#define pmc_for_each_mode(mode, pmcdev) \ + for (unsigned int __i = 0, __cond; \ + __cond = __i < (pmcdev)->num_lpm_modes, \ + __cond && ((mode) = (pmcdev)->lpm_en_modes[__i]), \ + __cond; \ + __i++) #define DEFINE_PMC_CORE_ATTR_WRITE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c index 1bde86c54eb9..c259c96b7dfd 100644 --- a/drivers/platform/x86/intel/pmc/core_ssram.c +++ b/drivers/platform/x86/intel/pmc/core_ssram.c @@ -9,11 +9,11 @@ */ #include <linux/cleanup.h> +#include <linux/intel_vsec.h> #include <linux/pci.h> #include <linux/io-64-nonatomic-lo-hi.h> #include "core.h" -#include "../vsec.h" #include "../pmt/telemetry.h" #define SSRAM_HDR_SIZE 0x100 @@ -45,7 +45,7 @@ static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc) struct telem_endpoint *ep; const u8 *lpm_indices; int num_maps, mode_offset = 0; - int ret, mode, i; + int ret, mode; int lpm_size; u32 guid; @@ -116,7 +116,7 @@ static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc) * */ mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET; - pmc_for_each_mode(i, mode, pmcdev) { + pmc_for_each_mode(mode, pmcdev) { u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps); int m; diff --git a/drivers/platform/x86/intel/pmc/icl.c b/drivers/platform/x86/intel/pmc/icl.c index 71b0fd6cb7d8..cbbd44054468 100644 --- a/drivers/platform/x86/intel/pmc/icl.c +++ b/drivers/platform/x86/intel/pmc/icl.c @@ -46,6 +46,8 @@ const struct pmc_reg_map icl_reg_map = { .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, + .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED, .etr3_offset = ETR3_OFFSET, }; diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c index c7d15d864039..91f2fa728f5c 100644 --- a/drivers/platform/x86/intel/pmc/mtl.c +++ b/drivers/platform/x86/intel/pmc/mtl.c @@ -462,6 +462,8 @@ const struct pmc_reg_map mtl_socm_reg_map = { .ppfear_buckets = MTL_SOCM_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, + .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .lpm_num_maps = ADL_LPM_NUM_MAPS, .ltr_ignore_max = MTL_SOCM_NUM_IP_IGN_ALLOWED, .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, diff --git a/drivers/platform/x86/intel/pmc/spt.c b/drivers/platform/x86/intel/pmc/spt.c index ab993a69e33e..2cd2b3c68e46 100644 --- a/drivers/platform/x86/intel/pmc/spt.c +++ b/drivers/platform/x86/intel/pmc/spt.c @@ -130,6 +130,8 @@ const struct pmc_reg_map spt_reg_map = { .ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT, + .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, + .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED, .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET, }; diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c index e0580de18077..371b4e30f142 100644 --- a/drivers/platform/x86/intel/pmc/tgl.c +++ b/drivers/platform/x86/intel/pmc/tgl.c @@ -197,6 +197,8 @@ const struct pmc_reg_map tgl_reg_map = { .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, + .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED, .lpm_num_maps = TGL_LPM_NUM_MAPS, .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index 4b53940a64e2..c04bb7f97a4d 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -9,12 +9,12 @@ */ #include <linux/kernel.h> +#include <linux/intel_vsec.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/pci.h> -#include "../vsec.h" #include "class.h" #define PMT_XA_START 1 @@ -58,6 +58,22 @@ pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count) return count; } +int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf, + void __iomem *addr, u32 count) +{ + if (cb && cb->read_telem) + return cb->read_telem(pdev, guid, buf, count); + + if (guid == GUID_SPR_PUNIT) + /* PUNIT on SPR only supports aligned 64-bit read */ + return pmt_memcpy64_fromio(buf, addr, count); + + memcpy_fromio(buf, addr, count); + + return count; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_read_mmio, INTEL_PMT); + /* * sysfs */ @@ -79,11 +95,8 @@ intel_pmt_read(struct file *filp, struct kobject *kobj, if (count > entry->size - off) count = entry->size - off; - if (entry->guid == GUID_SPR_PUNIT) - /* PUNIT on SPR only supports aligned 64-bit read */ - count = pmt_memcpy64_fromio(buf, entry->base + off, count); - else - memcpy_fromio(buf, entry->base + off, count); + count = pmt_telem_read_mmio(entry->ep->pcidev, entry->cb, entry->header.guid, buf, + entry->base + off, count); return count; } @@ -239,6 +252,7 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, entry->guid = header->guid; entry->size = header->size; + entry->cb = ivdev->priv_data; return 0; } @@ -300,7 +314,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, goto fail_ioremap; if (ns->pmt_add_endpoint) { - ret = ns->pmt_add_endpoint(entry, ivdev->pcidev); + ret = ns->pmt_add_endpoint(ivdev, entry); if (ret) goto fail_add_endpoint; } diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h index d23c63b73ab7..a267ac964423 100644 --- a/drivers/platform/x86/intel/pmt/class.h +++ b/drivers/platform/x86/intel/pmt/class.h @@ -2,13 +2,13 @@ #ifndef _INTEL_PMT_CLASS_H #define _INTEL_PMT_CLASS_H +#include <linux/intel_vsec.h> #include <linux/xarray.h> #include <linux/types.h> #include <linux/bits.h> #include <linux/err.h> #include <linux/io.h> -#include "../vsec.h" #include "telemetry.h" /* PMT access types */ @@ -24,6 +24,7 @@ struct pci_dev; struct telem_endpoint { struct pci_dev *pcidev; struct telem_header header; + struct pmt_callbacks *cb; void __iomem *base; bool present; struct kref kref; @@ -43,6 +44,7 @@ struct intel_pmt_entry { struct kobject *kobj; void __iomem *disc_table; void __iomem *base; + struct pmt_callbacks *cb; unsigned long base_addr; size_t size; u32 guid; @@ -55,10 +57,12 @@ struct intel_pmt_namespace { const struct attribute_group *attr_grp; int (*pmt_header_decode)(struct intel_pmt_entry *entry, struct device *dev); - int (*pmt_add_endpoint)(struct intel_pmt_entry *entry, - struct pci_dev *pdev); + int (*pmt_add_endpoint)(struct intel_vsec_device *ivdev, + struct intel_pmt_entry *entry); }; +int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf, + void __iomem *addr, u32 count); bool intel_pmt_is_early_client_hw(struct device *dev); int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns, diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c index 4014c02cafdb..9079d5dffc03 100644 --- a/drivers/platform/x86/intel/pmt/crashlog.c +++ b/drivers/platform/x86/intel/pmt/crashlog.c @@ -9,6 +9,7 @@ */ #include <linux/auxiliary_bus.h> +#include <linux/intel_vsec.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> @@ -16,7 +17,6 @@ #include <linux/uaccess.h> #include <linux/overflow.h> -#include "../vsec.h" #include "class.h" /* Crashlog discovery header types */ diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c index 09258564dfc4..c9feac859e57 100644 --- a/drivers/platform/x86/intel/pmt/telemetry.c +++ b/drivers/platform/x86/intel/pmt/telemetry.c @@ -9,6 +9,7 @@ */ #include <linux/auxiliary_bus.h> +#include <linux/intel_vsec.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> @@ -16,7 +17,6 @@ #include <linux/uaccess.h> #include <linux/overflow.h> -#include "../vsec.h" #include "class.h" #define TELEM_SIZE_OFFSET 0x0 @@ -93,8 +93,8 @@ static int pmt_telem_header_decode(struct intel_pmt_entry *entry, return 0; } -static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry, - struct pci_dev *pdev) +static int pmt_telem_add_endpoint(struct intel_vsec_device *ivdev, + struct intel_pmt_entry *entry) { struct telem_endpoint *ep; @@ -104,13 +104,14 @@ static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry, return -ENOMEM; ep = entry->ep; - ep->pcidev = pdev; + ep->pcidev = ivdev->pcidev; ep->header.access_type = entry->header.access_type; ep->header.guid = entry->header.guid; ep->header.base_offset = entry->header.base_offset; ep->header.size = entry->header.size; ep->base = entry->base; ep->present = true; + ep->cb = ivdev->priv_data; kref_init(&ep->kref); @@ -218,7 +219,8 @@ int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count) if (offset + NUM_BYTES_QWORD(count) > size) return -EINVAL; - memcpy_fromio(data, ep->base + offset, NUM_BYTES_QWORD(count)); + pmt_telem_read_mmio(ep->pcidev, ep->cb, ep->header.guid, data, ep->base + offset, + NUM_BYTES_QWORD(count)); return ep->present ? 0 : -EPIPE; } diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c index 277e4f4b20ac..9d137621f0e6 100644 --- a/drivers/platform/x86/intel/sdsi.c +++ b/drivers/platform/x86/intel/sdsi.c @@ -12,6 +12,7 @@ #include <linux/bits.h> #include <linux/bitfield.h> #include <linux/device.h> +#include <linux/intel_vsec.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> @@ -22,8 +23,6 @@ #include <linux/types.h> #include <linux/uaccess.h> -#include "vsec.h" - #define ACCESS_TYPE_BARID 2 #define ACCESS_TYPE_LOCAL 3 diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 10e21563fa46..9ad35fefea47 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -651,10 +651,6 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, /* Lock to prevent module registration when already opened by user space */ static DEFINE_MUTEX(punit_misc_dev_open_lock); -/* Lock to allow one shared misc device for all ISST interfaces */ -static DEFINE_MUTEX(punit_misc_dev_reg_lock); -static int misc_usage_count; -static int misc_device_ret; static int misc_device_open; static int isst_if_open(struct inode *inode, struct file *file) @@ -720,39 +716,23 @@ static struct miscdevice isst_if_char_driver = { static int isst_misc_reg(void) { - mutex_lock(&punit_misc_dev_reg_lock); - if (misc_device_ret) - goto unlock_exit; - - if (!misc_usage_count) { - misc_device_ret = isst_if_cpu_info_init(); - if (misc_device_ret) - goto unlock_exit; - - misc_device_ret = misc_register(&isst_if_char_driver); - if (misc_device_ret) { - isst_if_cpu_info_exit(); - goto unlock_exit; - } - } - misc_usage_count++; + int ret; -unlock_exit: - mutex_unlock(&punit_misc_dev_reg_lock); + ret = isst_if_cpu_info_init(); + if (ret) + return ret; - return misc_device_ret; + ret = misc_register(&isst_if_char_driver); + if (ret) + isst_if_cpu_info_exit(); + + return ret; } static void isst_misc_unreg(void) { - mutex_lock(&punit_misc_dev_reg_lock); - if (misc_usage_count) - misc_usage_count--; - if (!misc_usage_count && !misc_device_ret) { - misc_deregister(&isst_if_char_driver); - isst_if_cpu_info_exit(); - } - mutex_unlock(&punit_misc_dev_reg_lock); + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); } /** diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index 83e8b1fe53b3..486ddc9b3592 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -51,6 +51,7 @@ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/intel_tpmi.h> +#include <linux/intel_vsec.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> @@ -59,8 +60,6 @@ #include <linux/sizes.h> #include <linux/string_helpers.h> -#include "vsec.h" - /** * struct intel_tpmi_pfs_entry - TPMI PM Feature Structure (PFS) entry * @tpmi_id: TPMI feature identifier (what the feature is and its data format). diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c index 4e880585cbe4..e22b683a7a43 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c @@ -60,11 +60,16 @@ static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index static ssize_t store_attr(struct uncore_data *data, const char *buf, ssize_t count, enum uncore_index index) { - unsigned int input; + unsigned int input = 0; int ret; - if (kstrtouint(buf, 10, &input)) - return -EINVAL; + if (index == UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE) { + if (kstrtobool(buf, (bool *)&input)) + return -EINVAL; + } else { + if (kstrtouint(buf, 10, &input)) + return -EINVAL; + } mutex_lock(&uncore_lock); ret = uncore_write(data, input, index); @@ -103,6 +108,18 @@ show_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ); show_uncore_attr(current_freq_khz, UNCORE_INDEX_CURRENT_FREQ); +store_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); +store_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD); +store_uncore_attr(elc_high_threshold_enable, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE); +store_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ); + +show_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); +show_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD); +show_uncore_attr(elc_high_threshold_enable, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE); +show_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ); + #define show_uncore_data(member_name) \ static ssize_t show_##member_name(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf)\ @@ -146,7 +163,8 @@ show_uncore_data(initial_max_freq_khz); static int create_attr_group(struct uncore_data *data, char *name) { - int ret, freq, index = 0; + int ret, index = 0; + unsigned int val; init_attribute_rw(max_freq_khz); init_attribute_rw(min_freq_khz); @@ -168,10 +186,24 @@ static int create_attr_group(struct uncore_data *data, char *name) data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr; data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr; - ret = uncore_read(data, &freq, UNCORE_INDEX_CURRENT_FREQ); + ret = uncore_read(data, &val, UNCORE_INDEX_CURRENT_FREQ); if (!ret) data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr; + ret = uncore_read(data, &val, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); + if (!ret) { + init_attribute_rw(elc_low_threshold_percent); + init_attribute_rw(elc_high_threshold_percent); + init_attribute_rw(elc_high_threshold_enable); + init_attribute_rw(elc_floor_freq_khz); + + data->uncore_attrs[index++] = &data->elc_low_threshold_percent_kobj_attr.attr; + data->uncore_attrs[index++] = &data->elc_high_threshold_percent_kobj_attr.attr; + data->uncore_attrs[index++] = + &data->elc_high_threshold_enable_kobj_attr.attr; + data->uncore_attrs[index++] = &data->elc_floor_freq_khz_kobj_attr.attr; + } + data->uncore_attrs[index] = NULL; data->uncore_attr_group.name = name; diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h index 4c245b945e4e..26c854cd5d97 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h @@ -34,6 +34,13 @@ * @domain_id_kobj_attr: Storage for kobject attribute domain_id * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id * @package_id_kobj_attr: Storage for kobject attribute package_id + * @elc_low_threshold_percent_kobj_attr: + Storage for kobject attribute elc_low_threshold_percent + * @elc_high_threshold_percent_kobj_attr: + Storage for kobject attribute elc_high_threshold_percent + * @elc_high_threshold_enable_kobj_attr: + Storage for kobject attribute elc_high_threshold_enable + * @elc_floor_freq_khz_kobj_attr: Storage for kobject attribute elc_floor_freq_khz * @uncore_attrs: Attribute storage for group creation * * This structure is used to encapsulate all data related to uncore sysfs @@ -61,7 +68,11 @@ struct uncore_data { struct kobj_attribute domain_id_kobj_attr; struct kobj_attribute fabric_cluster_id_kobj_attr; struct kobj_attribute package_id_kobj_attr; - struct attribute *uncore_attrs[9]; + struct kobj_attribute elc_low_threshold_percent_kobj_attr; + struct kobj_attribute elc_high_threshold_percent_kobj_attr; + struct kobj_attribute elc_high_threshold_enable_kobj_attr; + struct kobj_attribute elc_floor_freq_khz_kobj_attr; + struct attribute *uncore_attrs[13]; }; #define UNCORE_DOMAIN_ID_INVALID -1 @@ -70,6 +81,10 @@ enum uncore_index { UNCORE_INDEX_MIN_FREQ, UNCORE_INDEX_MAX_FREQ, UNCORE_INDEX_CURRENT_FREQ, + UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, + UNCORE_INDEX_EFF_LAT_CTRL_FREQ, }; int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value, diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 9fa3037c03d1..0591053813a2 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -30,6 +30,7 @@ #define UNCORE_MAJOR_VERSION 0 #define UNCORE_MINOR_VERSION 2 +#define UNCORE_ELC_SUPPORTED_VERSION 2 #define UNCORE_HEADER_INDEX 0 #define UNCORE_FABRIC_CLUSTER_OFFSET 8 @@ -46,6 +47,7 @@ struct tpmi_uncore_struct; /* Information for each cluster */ struct tpmi_uncore_cluster_info { bool root_domain; + bool elc_supported; u8 __iomem *cluster_base; struct uncore_data uncore_data; struct tpmi_uncore_struct *uncore_root; @@ -75,6 +77,10 @@ struct tpmi_uncore_struct { /* Bit definitions for CONTROL register */ #define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8) #define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15) +#define UNCORE_EFF_LAT_CTRL_RATIO_MASK GENMASK_ULL(28, 22) +#define UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK GENMASK_ULL(38, 32) +#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE BIT(39) +#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK GENMASK_ULL(46, 40) /* Helper function to read MMIO offset for max/min control frequency */ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, @@ -89,6 +95,48 @@ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; } +/* Helper function to read efficiency latency control values over MMIO */ +static int read_eff_lat_ctrl(struct uncore_data *data, unsigned int *val, enum uncore_index index) +{ + struct tpmi_uncore_cluster_info *cluster_info; + u64 ctrl; + + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + if (cluster_info->root_domain) + return -ENODATA; + + if (!cluster_info->elc_supported) + return -EOPNOTSUPP; + + ctrl = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, ctrl); + *val *= 100; + *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK)); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, ctrl); + *val *= 100; + *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK)); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, ctrl); + break; + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_RATIO_MASK, ctrl) * UNCORE_FREQ_KHZ_MULTIPLIER; + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + #define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK) /* Helper for sysfs read for max/min frequencies. Called under mutex locks */ @@ -137,6 +185,82 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu return 0; } +/* Helper function for writing efficiency latency control values over MMIO */ +static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index) +{ + struct tpmi_uncore_cluster_info *cluster_info; + u64 control; + + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + + if (cluster_info->root_domain) + return -ENODATA; + + if (!cluster_info->elc_supported) + return -EOPNOTSUPP; + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + if (val > 100) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + if (val > 100) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + if (val > 1) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + val /= UNCORE_FREQ_KHZ_MULTIPLIER; + if (val > FIELD_MAX(UNCORE_EFF_LAT_CTRL_RATIO_MASK)) + return -EINVAL; + break; + + default: + return -EOPNOTSUPP; + } + + control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK); + val /= 100; + control &= ~UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK); + val /= 100; + control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + control &= ~UNCORE_EFF_LAT_CTRL_RATIO_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_RATIO_MASK, val); + break; + + default: + break; + } + + writeq(control, cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + return 0; +} + /* Helper function to write MMIO offset for max/min control frequency */ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input, unsigned int index) @@ -156,7 +280,7 @@ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, un writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX)); } -/* Callback for sysfs write for max/min frequencies. Called under mutex locks */ +/* Helper for sysfs write for max/min frequencies. Called under mutex locks */ static int uncore_write_control_freq(struct uncore_data *data, unsigned int input, enum uncore_index index) { @@ -234,6 +358,33 @@ static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncor case UNCORE_INDEX_CURRENT_FREQ: return uncore_read_freq(data, value); + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + return read_eff_lat_ctrl(data, value, index); + + default: + break; + } + + return -EOPNOTSUPP; +} + +/* Callback for sysfs write for TPMI uncore data. Called under mutex locks. */ +static int uncore_write(struct uncore_data *data, unsigned int value, enum uncore_index index) +{ + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + return write_eff_lat_ctrl(data, value, index); + + case UNCORE_INDEX_MIN_FREQ: + case UNCORE_INDEX_MAX_FREQ: + return uncore_write_control_freq(data, value, index); + default: break; } @@ -291,7 +442,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ return -EINVAL; /* Register callbacks to uncore core */ - ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq); + ret = uncore_freq_common_init(uncore_read, uncore_write); if (ret) return ret; @@ -409,6 +560,9 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ cluster_info->uncore_root = tpmi_uncore; + if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION) + cluster_info->elc_supported = true; + ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0); if (ret) { cluster_info->cluster_base = NULL; @@ -427,6 +581,9 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ auxiliary_set_drvdata(auxdev, tpmi_uncore); + if (topology_max_dies_per_package() > 1) + return 0; + tpmi_uncore->root_cluster.root_domain = true; tpmi_uncore->root_cluster.uncore_root = tpmi_uncore; @@ -450,7 +607,9 @@ static void uncore_remove(struct auxiliary_device *auxdev) { struct tpmi_uncore_struct *tpmi_uncore = auxiliary_get_drvdata(auxdev); - uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data); + if (tpmi_uncore->root_cluster.root_domain) + uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data); + remove_cluster_entries(tpmi_uncore); uncore_freq_common_exit(); diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 0fdfaf3a4f5c..7b5cc9993974 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -17,14 +17,13 @@ #include <linux/bits.h> #include <linux/cleanup.h> #include <linux/delay.h> -#include <linux/kernel.h> #include <linux/idr.h> +#include <linux/intel_vsec.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/types.h> -#include "vsec.h" - #define PMT_XA_START 0 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) @@ -213,6 +212,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he intel_vsec_dev->num_resources = header->num_entries; intel_vsec_dev->quirks = info->quirks; intel_vsec_dev->base_addr = info->base_addr; + intel_vsec_dev->priv_data = info->priv_data; if (header->id == VSEC_ID_SDSI) intel_vsec_dev->ida = &intel_vsec_sdsi_ida; @@ -341,7 +341,7 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev, void intel_vsec_register(struct pci_dev *pdev, struct intel_vsec_platform_info *info) { - if (!pdev || !info) + if (!pdev || !info || !info->headers) return; intel_vsec_walk_header(pdev, info); diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h deleted file mode 100644 index e23e76129691..000000000000 --- a/drivers/platform/x86/intel/vsec.h +++ /dev/null @@ -1,108 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _VSEC_H -#define _VSEC_H - -#include <linux/auxiliary_bus.h> -#include <linux/bits.h> - -#define VSEC_CAP_TELEMETRY BIT(0) -#define VSEC_CAP_WATCHER BIT(1) -#define VSEC_CAP_CRASHLOG BIT(2) -#define VSEC_CAP_SDSI BIT(3) -#define VSEC_CAP_TPMI BIT(4) - -/* Intel DVSEC offsets */ -#define INTEL_DVSEC_ENTRIES 0xA -#define INTEL_DVSEC_SIZE 0xB -#define INTEL_DVSEC_TABLE 0xC -#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) -#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) -#define TABLE_OFFSET_SHIFT 3 - -struct pci_dev; -struct resource; - -enum intel_vsec_id { - VSEC_ID_TELEMETRY = 2, - VSEC_ID_WATCHER = 3, - VSEC_ID_CRASHLOG = 4, - VSEC_ID_SDSI = 65, - VSEC_ID_TPMI = 66, -}; - -/** - * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers. - * @rev: Revision ID of the VSEC/DVSEC register space - * @length: Length of the VSEC/DVSEC register space - * @id: ID of the feature - * @num_entries: Number of instances of the feature - * @entry_size: Size of the discovery table for each feature - * @tbir: BAR containing the discovery tables - * @offset: BAR offset of start of the first discovery table - */ -struct intel_vsec_header { - u8 rev; - u16 length; - u16 id; - u8 num_entries; - u8 entry_size; - u8 tbir; - u32 offset; -}; - -enum intel_vsec_quirks { - /* Watcher feature not supported */ - VSEC_QUIRK_NO_WATCHER = BIT(0), - - /* Crashlog feature not supported */ - VSEC_QUIRK_NO_CRASHLOG = BIT(1), - - /* Use shift instead of mask to read discovery table offset */ - VSEC_QUIRK_TABLE_SHIFT = BIT(2), - - /* DVSEC not present (provided in driver data) */ - VSEC_QUIRK_NO_DVSEC = BIT(3), - - /* Platforms requiring quirk in the auxiliary driver */ - VSEC_QUIRK_EARLY_HW = BIT(4), -}; - -/* Platform specific data */ -struct intel_vsec_platform_info { - struct device *parent; - struct intel_vsec_header **headers; - unsigned long caps; - unsigned long quirks; - u64 base_addr; -}; - -struct intel_vsec_device { - struct auxiliary_device auxdev; - struct pci_dev *pcidev; - struct resource *resource; - struct ida *ida; - int num_resources; - int id; /* xa */ - void *priv_data; - size_t priv_data_size; - unsigned long quirks; - u64 base_addr; -}; - -int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, - struct intel_vsec_device *intel_vsec_dev, - const char *name); - -static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev) -{ - return container_of(dev, struct intel_vsec_device, auxdev.dev); -} - -static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev) -{ - return container_of(auxdev, struct intel_vsec_device, auxdev); -} - -void intel_vsec_register(struct pci_dev *pdev, - struct intel_vsec_platform_info *info); -#endif |