summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--CREDITS12
-rw-r--r--Documentation/admin-guide/pm/cpuidle.rst72
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dp.yaml19
-rw-r--r--Documentation/devicetree/bindings/iio/st,st-sensors.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml2
-rw-r--r--Documentation/sound/codecs/cs35l56.rst292
-rw-r--r--Documentation/sound/codecs/index.rst9
-rw-r--r--Documentation/sound/index.rst1
-rw-r--r--Documentation/trace/ftrace.rst6
-rw-r--r--Documentation/virt/kvm/api.rst3
-rw-r--r--Documentation/virt/kvm/devices/s390_flic.rst4
-rw-r--r--MAINTAINERS23
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imxrt1050.dtsi2
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-audio.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx95.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts8
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-crd.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100.dtsi8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk356x-base.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi1
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c3
-rw-r--r--arch/arm64/kvm/pmu-emul.c89
-rw-r--r--arch/arm64/kvm/sys_regs.c32
-rw-r--r--arch/powerpc/kvm/e500.h2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c199
-rw-r--r--arch/riscv/include/asm/page.h1
-rw-r--r--arch/riscv/include/asm/pgtable.h2
-rw-r--r--arch/riscv/include/asm/sbi.h1
-rw-r--r--arch/riscv/include/asm/spinlock.h5
-rw-r--r--arch/riscv/kernel/entry.S21
-rw-r--r--arch/riscv/kernel/module.c18
-rw-r--r--arch/riscv/kernel/probes/kprobes.c2
-rw-r--r--arch/riscv/kernel/stacktrace.c4
-rw-r--r--arch/riscv/kernel/traps.c6
-rw-r--r--arch/riscv/mm/init.c17
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/x86/kernel/fpu/regset.c3
-rw-r--r--arch/x86/kernel/static_call.c1
-rw-r--r--block/bfq-iosched.c12
-rw-r--r--drivers/acpi/acpi_video.c49
-rw-r--r--drivers/acpi/resource.c24
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/base/topology.c24
-rw-r--r--drivers/bluetooth/btmtk.c7
-rw-r--r--drivers/bluetooth/btnxpuart.c1
-rw-r--r--drivers/bus/mhi/host/pci_generic.c2
-rw-r--r--drivers/cpufreq/Kconfig4
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c4
-rw-r--r--drivers/cpuidle/governors/teo.c91
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c6
-rw-r--r--drivers/gpio/gpio-sim.c48
-rw-r--r--drivers/gpio/gpio-virtuser.c91
-rw-r--r--drivers/gpio/gpio-xilinx.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c76
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c48
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c1
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c4
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c8
-rw-r--r--drivers/gpu/drm/drm_bridge.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c12
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c25
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c69
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c46
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c1
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c60
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c7
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h25
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c15
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c1
-rw-r--r--drivers/hwmon/acpi_power_meter.c2
-rw-r--r--drivers/hwmon/drivetemp.c8
-rw-r--r--drivers/hwmon/ltc2991.c2
-rw-r--r--drivers/hwmon/tmp513.c7
-rw-r--r--drivers/i2c/busses/i2c-rcar.c20
-rw-r--r--drivers/i2c/i2c-atr.c2
-rw-r--r--drivers/i2c/i2c-core-base.c1
-rw-r--r--drivers/i2c/i2c-slave-testunit.c19
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c6
-rw-r--r--drivers/iio/adc/ad4695.c100
-rw-r--r--drivers/iio/adc/ad7124.c3
-rw-r--r--drivers/iio/adc/ad7173.c10
-rw-r--r--drivers/iio/adc/ad9467.c15
-rw-r--r--drivers/iio/adc/at91_adc.c2
-rw-r--r--drivers/iio/adc/rockchip_saradc.c2
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c13
-rw-r--r--drivers/iio/adc/ti-ads1119.c4
-rw-r--r--drivers/iio/adc/ti-ads124s08.c4
-rw-r--r--drivers/iio/adc/ti-ads1298.c2
-rw-r--r--drivers/iio/adc/ti-ads8688.c2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c9
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c22
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c3
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/inkern.c2
-rw-r--r--drivers/iio/light/bh1745.c2
-rw-r--r--drivers/iio/light/vcnl4035.c2
-rw-r--r--drivers/iio/pressure/zpa2326.c2
-rw-r--r--drivers/iio/temperature/tmp006.c2
-rw-r--r--drivers/iio/test/Kconfig2
-rw-r--r--drivers/iio/test/iio-test-rescale.c4
-rw-r--r--drivers/interconnect/icc-clk.c10
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c2
-rw-r--r--drivers/md/dm-ebs-target.c2
-rw-r--r--drivers/md/dm-thin.c5
-rw-r--r--drivers/md/dm-verity-fec.c59
-rw-r--r--drivers/md/persistent-data/dm-array.c19
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c4
-rw-r--r--drivers/mtd/spi-nor/core.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c63
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c19
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c96
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c263
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c24
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c22
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c3
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c44
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c14
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c24
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/gtp.c26
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/mctp/mctp-i3c.c4
-rw-r--r--drivers/net/pcs/pcs-xpcs.c4
-rw-r--r--drivers/net/pfcp.c15
-rw-r--r--drivers/of/address.c18
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi13
-rw-r--r--drivers/of/unittest.c14
-rw-r--r--drivers/pci/pcie/bwctrl.c25
-rw-r--r--drivers/perf/riscv_pmu_sbi.c22
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c8
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c5
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c4
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c1
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c1
-rw-r--r--drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c5
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c2
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c1
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/staging/gpib/Kconfig8
-rw-r--r--drivers/staging/gpib/agilent_82350b/Makefile2
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c4
-rw-r--r--drivers/staging/gpib/agilent_82357a/Makefile2
-rw-r--r--drivers/staging/gpib/cb7210/Makefile2
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.c12
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.h4
-rw-r--r--drivers/staging/gpib/cec/Makefile2
-rw-r--r--drivers/staging/gpib/cec/cec_gpib.c4
-rw-r--r--drivers/staging/gpib/common/gpib_os.c54
-rw-r--r--drivers/staging/gpib/eastwood/Makefile2
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.c12
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.h4
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c27
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.h4
-rw-r--r--drivers/staging/gpib/gpio/Makefile2
-rw-r--r--drivers/staging/gpib/gpio/gpib_bitbang.c2
-rw-r--r--drivers/staging/gpib/hp_82335/Makefile2
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.c21
-rw-r--r--drivers/staging/gpib/hp_82341/Makefile2
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.c16
-rw-r--r--drivers/staging/gpib/include/gpibP.h12
-rw-r--r--drivers/staging/gpib/include/gpib_types.h3
-rw-r--r--drivers/staging/gpib/include/nec7210.h5
-rw-r--r--drivers/staging/gpib/include/tms9914.h5
-rw-r--r--drivers/staging/gpib/ines/Makefile2
-rw-r--r--drivers/staging/gpib/ines/ines.h4
-rw-r--r--drivers/staging/gpib/ines/ines_gpib.c22
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/Makefile2
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c18
-rw-r--r--drivers/staging/gpib/nec7210/nec7210.c16
-rw-r--r--drivers/staging/gpib/ni_usb/Makefile2
-rw-r--r--drivers/staging/gpib/pc2/Makefile2
-rw-r--r--drivers/staging/gpib/pc2/pc2_gpib.c16
-rw-r--r--drivers/staging/gpib/tms9914/Makefile2
-rw-r--r--drivers/staging/gpib/tms9914/tms9914.c8
-rw-r--r--drivers/staging/gpib/tnt4882/Makefile2
-rw-r--r--drivers/staging/gpib/tnt4882/mite.c69
-rw-r--r--drivers/staging/gpib/tnt4882/mite.h13
-rw-r--r--drivers/staging/gpib/tnt4882/tnt4882_gpib.c67
-rw-r--r--drivers/staging/iio/frequency/ad9832.c2
-rw-r--r--drivers/staging/iio/frequency/ad9834.c2
-rw-r--r--drivers/thermal/thermal_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_core.c3
-rw-r--r--drivers/tty/serial/imx.c4
-rw-r--r--drivers/tty/serial/stm32-usart.c4
-rw-r--r--drivers/ufs/core/ufshcd-priv.h6
-rw-r--r--drivers/ufs/core/ufshcd.c10
-rw-r--r--drivers/ufs/host/ufs-qcom.c31
-rw-r--r--drivers/ufs/host/ufs-qcom.h5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c25
-rw-r--r--drivers/usb/class/usblp.c7
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/core/port.c7
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c1
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/Kconfig4
-rw-r--r--drivers/usb/gadget/configfs.c6
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_uac2.c1
-rw-r--r--drivers/usb/gadget/function/u_serial.c8
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c25
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c4
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c5
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c17
-rw-r--r--fs/9p/vfs_addr.c6
-rw-r--r--fs/afs/addr_prefs.c6
-rw-r--r--fs/afs/afs.h2
-rw-r--r--fs/afs/afs_vl.h1
-rw-r--r--fs/afs/vl_alias.c8
-rw-r--r--fs/afs/vlclient.c2
-rw-r--r--fs/afs/write.c5
-rw-r--r--fs/btrfs/ioctl.c122
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/btrfs/zlib.c4
-rw-r--r--fs/btrfs/zoned.c5
-rw-r--r--fs/cachefiles/daemon.c14
-rw-r--r--fs/cachefiles/internal.h3
-rw-r--r--fs/cachefiles/security.c6
-rw-r--r--fs/debugfs/file.c74
-rw-r--r--fs/debugfs/inode.c13
-rw-r--r--fs/debugfs/internal.h6
-rw-r--r--fs/exfat/dir.c3
-rw-r--r--fs/exfat/fatent.c10
-rw-r--r--fs/exfat/file.c6
-rw-r--r--fs/exfat/namei.c4
-rw-r--r--fs/file.c1
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/file.c31
-rw-r--r--fs/hfs/super.c4
-rw-r--r--fs/iomap/buffered-io.c68
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/revoke.c2
-rw-r--r--fs/mount.h15
-rw-r--r--fs/namespace.c24
-rw-r--r--fs/netfs/buffered_read.c28
-rw-r--r--fs/netfs/direct_write.c8
-rw-r--r--fs/netfs/read_collect.c33
-rw-r--r--fs/netfs/read_pgpriv2.c4
-rw-r--r--fs/netfs/read_retry.c11
-rw-r--r--fs/netfs/write_collect.c14
-rw-r--r--fs/netfs/write_issue.c2
-rw-r--r--fs/nfs/fscache.c9
-rw-r--r--fs/notify/fdinfo.c4
-rw-r--r--fs/overlayfs/copy_up.c16
-rw-r--r--fs/overlayfs/export.c49
-rw-r--r--fs/overlayfs/namei.c4
-rw-r--r--fs/overlayfs/overlayfs.h2
-rw-r--r--fs/qnx6/inode.c11
-rw-r--r--fs/smb/client/cifssmb.c24
-rw-r--r--fs/smb/client/connect.c3
-rw-r--r--fs/smb/client/namespace.c19
-rw-r--r--fs/smb/client/smb2pdu.c9
-rw-r--r--fs/smb/server/smb2pdu.c43
-rw-r--r--fs/smb/server/smb2pdu.h10
-rw-r--r--fs/smb/server/transport_rdma.c3
-rw-r--r--fs/smb/server/vfs.c3
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.h2
-rw-r--r--fs/xfs/xfs_dquot.c3
-rw-r--r--include/kvm/arm_pmu.h6
-rw-r--r--include/linux/bus/stm32_firewall_device.h2
-rw-r--r--include/linux/io_uring/cmd.h10
-rw-r--r--include/linux/iomap.h2
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/netfs.h7
-rw-r--r--include/linux/poll.h26
-rw-r--r--include/linux/pruss_driver.h12
-rw-r--r--include/linux/regulator/consumer.h77
-rw-r--r--include/linux/seccomp.h2
-rw-r--r--include/net/busy_poll.h8
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/page_pool/helpers.h2
-rw-r--r--include/net/sock.h17
-rw-r--r--include/net/xdp.h1
-rw-r--r--include/net/xdp_sock_drv.h14
-rw-r--r--include/ufs/ufshcd.h2
-rw-r--r--io_uring/eventfd.c16
-rw-r--r--io_uring/io_uring.c16
-rw-r--r--io_uring/io_uring.h7
-rw-r--r--io_uring/opdef.c3
-rw-r--r--io_uring/register.c52
-rw-r--r--io_uring/rsrc.c10
-rw-r--r--io_uring/sqpoll.c6
-rw-r--r--io_uring/timeout.c4
-rw-r--r--io_uring/uring_cmd.c23
-rw-r--r--io_uring/uring_cmd.h4
-rw-r--r--kernel/cgroup/cpuset.c44
-rw-r--r--kernel/events/uprobes.c2
-rwxr-xr-xkernel/gen_kheaders.sh1
-rw-r--r--kernel/sched/ext.c87
-rw-r--r--kernel/sched/ext.h8
-rw-r--r--kernel/sched/idle.c5
-rw-r--r--kernel/trace/trace.c1
-rw-r--r--kernel/trace/trace_irqsoff.c14
-rw-r--r--kernel/trace/trace_kprobe.c6
-rw-r--r--kernel/trace/trace_sched_wakeup.c14
-rw-r--r--kernel/workqueue.c7
-rw-r--r--net/802/psnap.c4
-rw-r--r--net/bluetooth/hci_sync.c11
-rw-r--r--net/bluetooth/mgmt.c38
-rw-r--r--net/bluetooth/rfcomm/tty.c4
-rw-r--r--net/core/dev.c43
-rw-r--r--net/core/dev.h3
-rw-r--r--net/core/filter.c30
-rw-r--r--net/core/link_watch.c10
-rw-r--r--net/core/netdev-genl-gen.c14
-rw-r--r--net/core/netdev-genl.c11
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/pktgen.c6
-rw-r--r--net/core/xdp.c1
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c46
-rw-r--r--net/mac802154/iface.c4
-rw-r--r--net/mptcp/ctrl.c17
-rw-r--r--net/mptcp/options.c6
-rw-r--r--net/mptcp/protocol.h9
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-manage.c16
-rw-r--r--net/ncsi/ncsi-rsp.c19
-rw-r--r--net/netfilter/nf_conntrack_core.c5
-rw-r--r--net/netfilter/nf_tables_api.c15
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/rds/tcp.c39
-rw-r--r--net/sched/cls_flow.c3
-rw-r--r--net/sched/sch_cake.c140
-rw-r--r--net/sctp/sysctl.c14
-rw-r--r--net/tls/tls_sw.c2
-rw-r--r--net/vmw_vsock/af_vsock.c18
-rw-r--r--net/vmw_vsock/virtio_transport_common.c36
-rw-r--r--net/vmw_vsock/vsock_bpf.c9
-rw-r--r--net/xdp/xsk.c14
-rw-r--r--security/selinux/avc.c61
-rw-r--r--security/selinux/hooks.c6
-rw-r--r--security/selinux/include/avc.h5
-rw-r--r--security/selinux/include/security.h3
-rw-r--r--security/selinux/ss/services.c28
-rw-r--r--sound/pci/hda/patch_realtek.c7
-rw-r--r--sound/soc/codecs/Kconfig1
-rw-r--r--sound/soc/codecs/cs42l43.c1
-rw-r--r--sound/soc/codecs/es8316.c10
-rw-r--r--sound/soc/codecs/es8326.c23
-rw-r--r--sound/soc/codecs/tas2781-i2c.c2
-rw-r--r--sound/soc/renesas/rcar/adg.c28
-rw-r--r--sound/soc/renesas/rcar/core.c4
-rw-r--r--sound/soc/renesas/rcar/rsnd.h2
-rw-r--r--sound/soc/samsung/Kconfig6
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py16
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_prs.sh33
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh9
-rw-r--r--tools/testing/selftests/kvm/aarch64/set_id_regs.c1
-rw-r--r--tools/testing/selftests/kvm/s390x/ucontrol_test.c172
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c43
-rw-r--r--tools/testing/selftests/riscv/abi/pointer_masking.c28
-rw-r--r--tools/testing/selftests/riscv/vector/v_initval_nolibc.c4
-rw-r--r--tools/testing/selftests/riscv/vector/vstate_prctl.c2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/flow.json4
469 files changed, 4230 insertions, 2546 deletions
diff --git a/.mailmap b/.mailmap
index 17f491cebc8c..399322897938 100644
--- a/.mailmap
+++ b/.mailmap
@@ -121,6 +121,8 @@ Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
Benjamin Poirier <benjamin.poirier@gmail.com> <bpoirier@suse.de>
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@gmail.com>
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@redhat.com>
+Bingwu Zhang <xtex@aosc.io> <xtexchooser@duck.com>
+Bingwu Zhang <xtex@aosc.io> <xtex@xtexx.eu.org>
Bjorn Andersson <andersson@kernel.org> <bjorn@kryo.se>
Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@linaro.org>
Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@sonymobile.com>
diff --git a/CREDITS b/CREDITS
index b1777b53c63a..cda68f04d5f1 100644
--- a/CREDITS
+++ b/CREDITS
@@ -20,6 +20,10 @@ N: Thomas Abraham
E: thomas.ab@samsung.com
D: Samsung pin controller driver
+N: Jose Abreu
+E: jose.abreu@synopsys.com
+D: Synopsys DesignWare XPCS MDIO/PCS driver.
+
N: Dragos Acostachioaie
E: dragos@iname.com
W: http://www.arbornet.org/~dragos
@@ -1428,6 +1432,10 @@ S: 8124 Constitution Apt. 7
S: Sterling Heights, Michigan 48313
S: USA
+N: Andy Gospodarek
+E: andy@greyhouse.net
+D: Maintenance and contributions to the network interface bonding driver.
+
N: Wolfgang Grandegger
E: wg@grandegger.com
D: Controller Area Network (device drivers)
@@ -1812,6 +1820,10 @@ D: Author/maintainer of most DRM drivers (especially ATI, MGA)
D: Core DRM templates, general DRM and 3D-related hacking
S: No fixed address
+N: Woojung Huh
+E: woojung.huh@microchip.com
+D: Microchip LAN78XX USB Ethernet driver
+
N: Kenn Humborg
E: kenn@wombat.ie
D: Mods to loop device to support sparse backing files
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
index 19754beb5a4e..eb58d7a5affd 100644
--- a/Documentation/admin-guide/pm/cpuidle.rst
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -269,27 +269,7 @@ Namely, when invoked to select an idle state for a CPU (i.e. an idle state that
the CPU will ask the processor hardware to enter), it attempts to predict the
idle duration and uses the predicted value for idle state selection.
-It first obtains the time until the closest timer event with the assumption
-that the scheduler tick will be stopped. That time, referred to as the *sleep
-length* in what follows, is the upper bound on the time before the next CPU
-wakeup. It is used to determine the sleep length range, which in turn is needed
-to get the sleep length correction factor.
-
-The ``menu`` governor maintains two arrays of sleep length correction factors.
-One of them is used when tasks previously running on the given CPU are waiting
-for some I/O operations to complete and the other one is used when that is not
-the case. Each array contains several correction factor values that correspond
-to different sleep length ranges organized so that each range represented in the
-array is approximately 10 times wider than the previous one.
-
-The correction factor for the given sleep length range (determined before
-selecting the idle state for the CPU) is updated after the CPU has been woken
-up and the closer the sleep length is to the observed idle duration, the closer
-to 1 the correction factor becomes (it must fall between 0 and 1 inclusive).
-The sleep length is multiplied by the correction factor for the range that it
-falls into to obtain the first approximation of the predicted idle duration.
-
-Next, the governor uses a simple pattern recognition algorithm to refine its
+It first uses a simple pattern recognition algorithm to obtain a preliminary
idle duration prediction. Namely, it saves the last 8 observed idle duration
values and, when predicting the idle duration next time, it computes the average
and variance of them. If the variance is small (smaller than 400 square
@@ -301,29 +281,39 @@ Again, if the variance of them is small (in the above sense), the average is
taken as the "typical interval" value and so on, until either the "typical
interval" is determined or too many data points are disregarded, in which case
the "typical interval" is assumed to equal "infinity" (the maximum unsigned
-integer value). The "typical interval" computed this way is compared with the
-sleep length multiplied by the correction factor and the minimum of the two is
-taken as the predicted idle duration.
-
-Then, the governor computes an extra latency limit to help "interactive"
-workloads. It uses the observation that if the exit latency of the selected
-idle state is comparable with the predicted idle duration, the total time spent
-in that state probably will be very short and the amount of energy to save by
-entering it will be relatively small, so likely it is better to avoid the
-overhead related to entering that state and exiting it. Thus selecting a
-shallower state is likely to be a better option then. The first approximation
-of the extra latency limit is the predicted idle duration itself which
-additionally is divided by a value depending on the number of tasks that
-previously ran on the given CPU and now they are waiting for I/O operations to
-complete. The result of that division is compared with the latency limit coming
-from the power management quality of service, or `PM QoS <cpu-pm-qos_>`_,
-framework and the minimum of the two is taken as the limit for the idle states'
-exit latency.
+integer value).
+
+If the "typical interval" computed this way is long enough, the governor obtains
+the time until the closest timer event with the assumption that the scheduler
+tick will be stopped. That time, referred to as the *sleep length* in what follows,
+is the upper bound on the time before the next CPU wakeup. It is used to determine
+the sleep length range, which in turn is needed to get the sleep length correction
+factor.
+
+The ``menu`` governor maintains an array containing several correction factor
+values that correspond to different sleep length ranges organized so that each
+range represented in the array is approximately 10 times wider than the previous
+one.
+
+The correction factor for the given sleep length range (determined before
+selecting the idle state for the CPU) is updated after the CPU has been woken
+up and the closer the sleep length is to the observed idle duration, the closer
+to 1 the correction factor becomes (it must fall between 0 and 1 inclusive).
+The sleep length is multiplied by the correction factor for the range that it
+falls into to obtain an approximation of the predicted idle duration that is
+compared to the "typical interval" determined previously and the minimum of
+the two is taken as the idle duration prediction.
+
+If the "typical interval" value is small, which means that the CPU is likely
+to be woken up soon enough, the sleep length computation is skipped as it may
+be costly and the idle duration is simply predicted to equal the "typical
+interval" value.
Now, the governor is ready to walk the list of idle states and choose one of
them. For this purpose, it compares the target residency of each state with
-the predicted idle duration and the exit latency of it with the computed latency
-limit. It selects the state with the target residency closest to the predicted
+the predicted idle duration and the exit latency of it with the with the latency
+limit coming from the power management quality of service, or `PM QoS <cpu-pm-qos_>`_,
+framework. It selects the state with the target residency closest to the predicted
idle duration, but still below it, and exit latency that does not exceed the
limit.
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dp.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dp.yaml
index 2aef1eb32e11..75ce92f4a5fd 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dp.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dp.yaml
@@ -42,6 +42,9 @@ properties:
interrupts:
maxItems: 1
+ '#sound-dai-cells':
+ const: 0
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
@@ -85,7 +88,21 @@ required:
- ports
- max-linkrate-mhz
-additionalProperties: false
+allOf:
+ - $ref: /schemas/sound/dai-common.yaml#
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - mediatek,mt8188-dp-tx
+ - mediatek,mt8195-dp-tx
+ then:
+ properties:
+ '#sound-dai-cells': false
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
index 71c1ee33a393..e955eb8e8797 100644
--- a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
+++ b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
@@ -65,6 +65,7 @@ properties:
- st,lsm9ds0-gyro
- description: STMicroelectronics Magnetometers
enum:
+ - st,iis2mdc
- st,lis2mdl
- st,lis3mdl-magn
- st,lsm303agr-magn
diff --git a/Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml b/Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml
index a12cda8aa764..cd09560e0aea 100644
--- a/Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml
+++ b/Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml
@@ -81,7 +81,7 @@ properties:
List of phandles, each pointing to the power supply for the
corresponding pairset named in 'pairset-names'. This property
aligns with IEEE 802.3-2022, Section 33.2.3 and 145.2.4.
- PSE Pinout Alternatives (as per IEEE 802.3-2022 Table 145\u20133)
+ PSE Pinout Alternatives (as per IEEE 802.3-2022 Table 145-3)
|-----------|---------------|---------------|---------------|---------------|
| Conductor | Alternative A | Alternative A | Alternative B | Alternative B |
| | (MDI-X) | (MDI) | (X) | (S) |
diff --git a/Documentation/sound/codecs/cs35l56.rst b/Documentation/sound/codecs/cs35l56.rst
new file mode 100644
index 000000000000..98c6f6c74394
--- /dev/null
+++ b/Documentation/sound/codecs/cs35l56.rst
@@ -0,0 +1,292 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+=====================================================================
+Audio drivers for Cirrus Logic CS35L54/56/57 Boosted Smart Amplifiers
+=====================================================================
+:Copyright: 2025 Cirrus Logic, Inc. and
+ Cirrus Logic International Semiconductor Ltd.
+
+Contact: patches@opensource.cirrus.com
+
+Summary
+=======
+
+The high-level summary of this document is:
+
+**If you have a laptop that uses CS35L54/56/57 amplifiers but audio is not
+working, DO NOT ATTEMPT TO USE FIRMWARE AND SETTINGS FROM ANOTHER LAPTOP,
+EVEN IF THAT LAPTOP SEEMS SIMILAR.**
+
+The CS35L54/56/57 amplifiers must be correctly configured for the power
+supply voltage, speaker impedance, maximum speaker voltage/current, and
+other external hardware connections.
+
+The amplifiers feature advanced boost technology that increases the voltage
+used to drive the speakers, while proprietary speaker protection algorithms
+allow these boosted amplifiers to push the limits of the speakers without
+causing damage. These **must** be configured correctly.
+
+Supported Cirrus Logic amplifiers
+---------------------------------
+
+The cs35l56 drivers support:
+
+* CS35L54
+* CS35L56
+* CS35L57
+
+There are two drivers in the kernel
+
+*For systems using SoundWire*: sound/soc/codecs/cs35l56.c and associated files
+
+*For systems using HDA*: sound/pci/hda/cs35l56_hda.c
+
+Firmware
+========
+
+The amplifier is controlled and managed by firmware running on the internal
+DSP. Firmware files are essential to enable the full capabilities of the
+amplifier.
+
+Firmware is distributed in the linux-firmware repository:
+https://gitlab.com/kernel-firmware/linux-firmware.git
+
+On most SoundWire systems the amplifier has a default minimum capability to
+produce audio. However this will be
+
+* at low volume, to protect the speakers, since the speaker specifications
+ and power supply voltages are unknown.
+* a mono mix of left and right channels.
+
+On some SoundWire systems that have both CS42L43 and CS35L56/57 the CS35L56/57
+receive their audio from the CS42L43 instead of directly from the host
+SoundWire interface. These systems can be identified by the CS42L43 showing
+in dmesg as a SoundWire device, but the CS35L56/57 as SPI. On these systems
+the firmware is *mandatory* to enable receiving the audio from the CS42L43.
+
+On HDA systems the firmware is *mandatory* to enable HDA bridge mode. There
+will not be any audio from the amplifiers without firmware.
+
+Cirrus Logic firmware files
+---------------------------
+
+Each amplifier requires two firmware files. One file has a .wmfw suffix, the
+other has a .bin suffix.
+
+The firmware is customized by the OEM to match the hardware of each laptop,
+and the firmware is specific to that laptop. Because of this, there are many
+firmware files in linux-firmware for these amplifiers. Firmware files are
+**not interchangeable between laptops**.
+
+Cirrus Logic submits files for known laptops to the upstream linux-firmware
+repository. Providing Cirrus Logic is aware of a particular laptop and has
+permission from the manufacturer to publish the firmware, it will be pushed
+to linux-firmware. You may need to upgrade to a newer release of
+linux-firmware to obtain the firmware for your laptop.
+
+**Important:** the Makefile for linux-firmware creates symlinks that are listed
+in the WHENCE file. These symlinks are required for the CS35L56 driver to be
+able to load the firmware.
+
+How do I know which firmware file I should have?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+All firmware file names are qualified with a unique "system ID". On normal
+x86 PCs with PCI audio this is the Vendor Subsystem ID (SSID) of the host
+PCI audio interface.
+
+The SSID can be viewed using the lspci tool::
+
+ lspci -v -nn | grep -A2 -i audio
+ 0000:00:1f.3 Audio device [0403]: Intel Corporation Meteor Lake-P HD Audio Controller [8086:7e28]
+ Subsystem: Dell Meteor Lake-P HD Audio Controller [1028:0c63]
+
+In this example the SSID is 10280c63.
+
+The format of the firmware file names is:
+
+ cs35lxx-b0-dsp1-misc-SSID[-spkidX]-ampN
+
+Where:
+
+ * cs35lxx-b0 is the amplifier model and silicon revision. This information
+ is logged by the driver during initialization.
+ * SSID is the 8-digit hexadecimal SSID value.
+ * ampN is the amplifier number (for example amp1). This is the same as
+ the prefix on the ALSA control names except that it is always lower-case
+ in the file name.
+ * spkidX is an optional part, used for laptops that have firmware
+ configurations for different makes and models of internal speakers.
+
+Sound Open Firmware and ALSA topology files
+-------------------------------------------
+
+All SoundWire systems will require a Sound Open Firmware (SOF) for the
+host CPU audio DSP, together with an ALSA topology file (.tplg).
+
+The SOF firmware will usually be provided by the manufacturer of the host
+CPU (i.e. Intel or AMD). The .tplg file is normally part of the SOF firmware
+release.
+
+SOF binary builds are available from: https://github.com/thesofproject/sof-bin/releases
+
+The main SOF source is here: https://github.com/thesofproject
+
+ALSA-ucm configurations
+-----------------------
+Typically an appropriate ALSA-ucm configuration file is needed for
+use-case managers and audio servers such as PipeWire.
+
+Configuration files are available from the alsa-ucm-conf repository:
+https://git.alsa-project.org/?p=alsa-ucm-conf.git
+
+Kernel log messages
+===================
+
+SoundWire
+---------
+A successful initialization will look like this (this will be repeated for
+each amplifier)::
+
+ [ 7.568374] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_P not found, using dummy regulator
+ [ 7.605208] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_IO not found, using dummy regulator
+ [ 7.605313] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_A not found, using dummy regulator
+ [ 7.939279] cs35l56 sdw:0:0:01fa:3556:01:0: Cirrus Logic CS35L56 Rev B0 OTP3 fw:3.4.4 (patched=0)
+ [ 7.947844] cs35l56 sdw:0:0:01fa:3556:01:0: Slave 4 state check1: UNATTACHED, status was 1
+ [ 8.740280] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_B not found, using dummy regulator
+ [ 8.740552] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_AMP not found, using dummy regulator
+ [ 9.242164] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: format 3 timestamp 0x66b2b872
+ [ 9.242173] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: Tue 05 Dec 2023 21:37:21 GMT Standard Time
+ [ 9.991709] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: Firmware: 1a00d6 vendor: 0x2 v3.11.23, 41 algorithms
+ [10.039098] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx-amp1.bin: v3.11.23
+ [10.879235] cs35l56 sdw:0:0:01fa:3556:01:0: Slave 4 state check1: UNATTACHED, status was 1
+ [11.401536] cs35l56 sdw:0:0:01fa:3556:01:0: Calibration applied
+
+HDA
+---
+A successful initialization will look like this (this will be repeated for
+each amplifier)::
+
+ [ 6.306475] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: Cirrus Logic CS35L56 Rev B0 OTP3 fw:3.4.4 (patched=0)
+ [ 6.613892] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP system name: 'xxxxxxxx', amp name: 'AMP1'
+ [ 8.266660] snd_hda_codec_cs8409 ehdaudio0D0: bound i2c-CSC3556:00-cs35l56-hda.0 (ops cs35l56_hda_comp_ops [snd_hda_scodec_cs35l56])
+ [ 8.287525] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: format 3 timestamp 0x66b2b872
+ [ 8.287528] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: Tue 05 Dec 2023 21:37:21 GMT Standard Time
+ [ 9.984335] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: Firmware: 1a00d6 vendor: 0x2 v3.11.23, 41 algorithms
+ [10.085797] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx-amp1.bin: v3.11.23
+ [10.655237] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: Calibration applied
+
+Important messages
+~~~~~~~~~~~~~~~~~~
+Cirrus Logic CS35L56 Rev B0 OTP3 fw:3.4.4 (patched=0)
+ Shows that the driver has been able to read device ID registers from the
+ amplifier.
+
+ * The actual amplifier type and silicon revision (CS35L56 B0 in this
+ example) is shown, as read from the amplifier identification registers.
+ * (patched=0) is normal, and indicates that the amplifier has been hard
+ reset and is running default ROM firmware.
+ * (patched=1) means that something has previously downloaded firmware
+ to the amplifier and the driver does not have control of the RESET
+ signal to be able to replace this preloaded firmware. This is normal
+ for systems where the BIOS downloads firmware to the amplifiers
+ before OS boot.
+ This status can also be seen if the cs35l56 kernel module is unloaded
+ and reloaded on a system where the driver does not have control of
+ RESET. SoundWire systems typically do not give the driver control of
+ RESET and only a BIOS (re)boot can reset the amplifiers.
+
+DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw
+ Shows that a .wmfw firmware file was found and downloaded.
+
+DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx-amp1.bin
+ Shows that a .bin firmware file was found and downloaded.
+
+Calibration applied
+ Factory calibration data in EFI was written to the amplifier.
+
+Error messages
+==============
+This section explains some of the error messages that the driver can log.
+
+Algorithm coefficient version %d.%d.%d but expected %d.%d.%d
+ The version of the .bin file content does not match the loaded firmware.
+ Caused by mismatched .wmfw and .bin file, or .bin file was found but
+ .wmfw was not.
+
+No %s for algorithm %x
+ The version of the .bin file content does not match the loaded firmware.
+ Caused by mismatched .wmfw and .bin file, or .bin file was found but
+ .wmfw was not.
+
+.bin file required but not found
+ HDA driver did not find a .bin file that matches this hardware.
+
+Calibration disabled due to missing firmware controls
+ Driver was not able to write EFI calibration data to firmware registers.
+ This typically means that either:
+
+ * The driver did not find a suitable wmfw for this hardware, or
+ * The amplifier has already been patched with firmware by something
+ previously, and the driver does not have control of a hard RESET line
+ to be able to reset the amplifier and download the firmware files it
+ found. This situation is indicated by the device identification
+ string in the kernel log shows "(patched=1)"
+
+Failed to write calibration
+ Same meaning and cause as "Calibration disabled due to missing firmware
+ controls"
+
+Failed to read calibration data from EFI
+ Factory calibration data in EFI is missing, empty or corrupt.
+ This is most likely to be cause by accidentally deleting the file from
+ the EFI filesystem.
+
+No calibration for silicon ID
+ The factory calibration data in EFI does not match this hardware.
+ The most likely cause is that an amplifier has been replaced on the
+ motherboard without going through manufacturer calibration process to
+ generate calibration data for the new amplifier.
+
+Did not find any buses for CSCxxxx
+ Only on HDA systems. The HDA codec driver found an ACPI entry for
+ Cirrus Logic companion amps, but could not enumerate the ACPI entries for
+ the I2C/SPI buses. The most likely cause of this is that:
+
+ * The relevant bus driver (I2C or SPI) is not part of the kernel.
+ * The HDA codec driver was built-in to the kernel but the I2C/SPI
+ bus driver is a module and so the HDA codec driver cannot call the
+ bus driver functions.
+
+init_completion timed out
+ The SoundWire bus controller (host end) did not enumerate the amplifier.
+ In other words, the ACPI says there is an amplifier but for some reason
+ it was not detected on the bus.
+
+No AF01 node
+ Indicates an error in ACPI. A SoundWire system should have a Device()
+ node named "AF01" but it was not found.
+
+Failed to get spk-id-gpios
+ ACPI says that the driver should request a GPIO but the driver was not
+ able to get that GPIO. The most likely cause is that the kernel does not
+ include the correct GPIO or PINCTRL driver for this system.
+
+Failed to read spk-id
+ ACPI says that the driver should request a GPIO but the driver was not
+ able to read that GPIO.
+
+Unexpected spk-id element count
+ AF01 contains more speaker ID GPIO entries than the driver supports
+
+Overtemp error
+ Amplifier overheat protection was triggered and the amplifier shut down
+ to protect itself.
+
+Amp short error
+ Amplifier detected a short-circuit on the speaker output pins and shut
+ down for protection. This would normally indicate a damaged speaker.
+
+Hibernate wake failed
+ The driver tried to wake the amplifier from its power-saving state but
+ did not see the expected responses from the amplifier. This can be caused
+ by using firmware that does not match the hardware.
diff --git a/Documentation/sound/codecs/index.rst b/Documentation/sound/codecs/index.rst
new file mode 100644
index 000000000000..2cb95d87bbef
--- /dev/null
+++ b/Documentation/sound/codecs/index.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Codec-Specific Information
+==========================
+
+.. toctree::
+ :maxdepth: 2
+
+ cs35l56
diff --git a/Documentation/sound/index.rst b/Documentation/sound/index.rst
index c437f2a4bc85..51cd736f65b5 100644
--- a/Documentation/sound/index.rst
+++ b/Documentation/sound/index.rst
@@ -13,6 +13,7 @@ Sound Subsystem Documentation
alsa-configuration
hd-audio/index
cards/index
+ codecs/index
utimers
.. only:: subproject and html
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 272464bb7c60..2b74f96d09d5 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -810,6 +810,12 @@ Here is the list of current tracers that may be configured.
to draw a graph of function calls similar to C code
source.
+ Note that the function graph calculates the timings of when the
+ function starts and returns internally and for each instance. If
+ there are two instances that run function graph tracer and traces
+ the same functions, the length of the timings may be slightly off as
+ each read the timestamp separately and not at the same time.
+
"blk"
The block tracer. The tracer used by the blktrace user
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 454c2aaa155e..f15b61317aad 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -1914,6 +1914,9 @@ No flags are specified so far, the corresponding field must be set to zero.
#define KVM_IRQ_ROUTING_HV_SINT 4
#define KVM_IRQ_ROUTING_XEN_EVTCHN 5
+On s390, adding a KVM_IRQ_ROUTING_S390_ADAPTER is rejected on ucontrol VMs with
+error -EINVAL.
+
flags:
- KVM_MSI_VALID_DEVID: used along with KVM_IRQ_ROUTING_MSI routing entry
diff --git a/Documentation/virt/kvm/devices/s390_flic.rst b/Documentation/virt/kvm/devices/s390_flic.rst
index ea96559ba501..b784f8016748 100644
--- a/Documentation/virt/kvm/devices/s390_flic.rst
+++ b/Documentation/virt/kvm/devices/s390_flic.rst
@@ -58,11 +58,15 @@ Groups:
Enables async page faults for the guest. So in case of a major page fault
the host is allowed to handle this async and continues the guest.
+ -EINVAL is returned when called on the FLIC of a ucontrol VM.
+
KVM_DEV_FLIC_APF_DISABLE_WAIT
Disables async page faults for the guest and waits until already pending
async page faults are done. This is necessary to trigger a completion interrupt
for every init interrupt before migrating the interrupt list.
+ -EINVAL is returned when called on the FLIC of a ucontrol VM.
+
KVM_DEV_FLIC_ADAPTER_REGISTER
Register an I/O adapter interrupt source. Takes a kvm_s390_io_adapter
describing the adapter to register::
diff --git a/MAINTAINERS b/MAINTAINERS
index 30cbc3d44cd5..0fa7c5728f1e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -949,7 +949,6 @@ AMAZON ETHERNET DRIVERS
M: Shay Agroskin <shayagr@amazon.com>
M: Arthur Kiyanovski <akiyano@amazon.com>
R: David Arinzon <darinzon@amazon.com>
-R: Noam Dagan <ndagan@amazon.com>
R: Saeed Bishara <saeedb@amazon.com>
L: netdev@vger.kernel.org
S: Supported
@@ -2690,7 +2689,6 @@ N: at91
N: atmel
ARM/Microchip Sparx5 SoC support
-M: Lars Povlsen <lars.povlsen@microchip.com>
M: Steen Hegelund <Steen.Hegelund@microchip.com>
M: Daniel Machon <daniel.machon@microchip.com>
M: UNGLinuxDriver@microchip.com
@@ -4058,7 +4056,6 @@ F: net/bluetooth/
BONDING DRIVER
M: Jay Vosburgh <jv@jvosburgh.net>
-M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/bonding.rst
@@ -4131,7 +4128,6 @@ S: Odd Fixes
F: drivers/net/ethernet/netronome/nfp/bpf/
BPF JIT for POWERPC (32-BIT AND 64-BIT)
-M: Michael Ellerman <mpe@ellerman.id.au>
M: Hari Bathini <hbathini@linux.ibm.com>
M: Christophe Leroy <christophe.leroy@csgroup.eu>
R: Naveen N Rao <naveen@kernel.org>
@@ -5469,6 +5465,7 @@ L: linux-sound@vger.kernel.org
L: patches@opensource.cirrus.com
S: Maintained
F: Documentation/devicetree/bindings/sound/cirrus,cs*
+F: Documentation/sound/codecs/cs*
F: drivers/mfd/cs42l43*
F: drivers/pinctrl/cirrus/pinctrl-cs42l43*
F: drivers/spi/spi-cs42l43*
@@ -12632,7 +12629,7 @@ F: arch/mips/include/uapi/asm/kvm*
F: arch/mips/kvm/
KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
-M: Michael Ellerman <mpe@ellerman.id.au>
+M: Madhavan Srinivasan <maddy@linux.ibm.com>
R: Nicholas Piggin <npiggin@gmail.com>
L: linuxppc-dev@lists.ozlabs.org
L: kvm@vger.kernel.org
@@ -13211,11 +13208,11 @@ X: drivers/macintosh/adb-iop.c
X: drivers/macintosh/via-macii.c
LINUX FOR POWERPC (32-BIT AND 64-BIT)
+M: Madhavan Srinivasan <maddy@linux.ibm.com>
M: Michael Ellerman <mpe@ellerman.id.au>
R: Nicholas Piggin <npiggin@gmail.com>
R: Christophe Leroy <christophe.leroy@csgroup.eu>
R: Naveen N Rao <naveen@kernel.org>
-M: Madhavan Srinivasan <maddy@linux.ibm.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
W: https://github.com/linuxppc/wiki/wiki
@@ -14566,7 +14563,6 @@ F: drivers/dma/mediatek/
MEDIATEK ETHERNET DRIVER
M: Felix Fietkau <nbd@nbd.name>
M: Sean Wang <sean.wang@mediatek.com>
-M: Mark Lee <Mark-MC.Lee@mediatek.com>
M: Lorenzo Bianconi <lorenzo@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
@@ -21993,6 +21989,7 @@ W: https://github.com/thesofproject/linux/
F: sound/soc/sof/
SOUND - GENERIC SOUND CARD (Simple-Audio-Card, Audio-Graph-Card)
+M: Mark Brown <broonie@kernel.org>
M: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
S: Supported
L: linux-sound@vger.kernel.org
@@ -22503,11 +22500,8 @@ F: Documentation/devicetree/bindings/phy/st,stm32mp25-combophy.yaml
F: drivers/phy/st/phy-stm32-combophy.c
STMMAC ETHERNET DRIVER
-M: Alexandre Torgue <alexandre.torgue@foss.st.com>
-M: Jose Abreu <joabreu@synopsys.com>
L: netdev@vger.kernel.org
-S: Supported
-W: http://www.stlinux.com
+S: Orphan
F: Documentation/networking/device_drivers/ethernet/stmicro/
F: drivers/net/ethernet/stmicro/stmmac/
@@ -22739,9 +22733,8 @@ S: Supported
F: drivers/net/ethernet/synopsys/
SYNOPSYS DESIGNWARE ETHERNET XPCS DRIVER
-M: Jose Abreu <Jose.Abreu@synopsys.com>
L: netdev@vger.kernel.org
-S: Supported
+S: Orphan
F: drivers/net/pcs/pcs-xpcs.c
F: drivers/net/pcs/pcs-xpcs.h
F: include/linux/pcs/pcs-xpcs.h
@@ -23649,7 +23642,6 @@ F: tools/testing/selftests/timers/
TIPC NETWORK LAYER
M: Jon Maloy <jmaloy@redhat.com>
-M: Ying Xue <ying.xue@windriver.com>
L: netdev@vger.kernel.org (core kernel code)
L: tipc-discussion@lists.sourceforge.net (user apps, general discussion)
S: Maintained
@@ -24255,7 +24247,8 @@ F: Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
F: drivers/usb/isp1760/*
USB LAN78XX ETHERNET DRIVER
-M: Woojung Huh <woojung.huh@microchip.com>
+M: Thangaraj Samynathan <Thangaraj.S@microchip.com>
+M: Rengarajan Sundararajan <Rengarajan.S@microchip.com>
M: UNGLinuxDriver@microchip.com
L: netdev@vger.kernel.org
S: Maintained
diff --git a/Makefile b/Makefile
index 7904d5d88088..e20a62ad397f 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/nxp/imx/imxrt1050.dtsi b/arch/arm/boot/dts/nxp/imx/imxrt1050.dtsi
index dd714d235d5f..b0bad0d1ba36 100644
--- a/arch/arm/boot/dts/nxp/imx/imxrt1050.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imxrt1050.dtsi
@@ -87,7 +87,7 @@
reg = <0x402c0000 0x4000>;
interrupts = <110>;
clocks = <&clks IMXRT1050_CLK_IPG_PDOF>,
- <&clks IMXRT1050_CLK_OSC>,
+ <&clks IMXRT1050_CLK_AHB_PODF>,
<&clks IMXRT1050_CLK_USDHC1>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 0beecdde55f5..f25eadcba5e6 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -323,6 +323,7 @@ CONFIG_SND_SOC_IMX_SGTL5000=y
CONFIG_SND_SOC_FSL_ASOC_CARD=y
CONFIG_SND_SOC_AC97_CODEC=y
CONFIG_SND_SOC_CS42XX8_I2C=y
+CONFIG_SND_SOC_SPDIF=y
CONFIG_SND_SOC_TLV320AIC3X_I2C=y
CONFIG_SND_SOC_WM8960=y
CONFIG_SND_SOC_WM8962=y
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi
index a60ebb718789..c32a6947ae9c 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi
@@ -165,7 +165,7 @@ audio_subsys: bus@59000000 {
};
esai0: esai@59010000 {
- compatible = "fsl,imx8qm-esai";
+ compatible = "fsl,imx8qm-esai", "fsl,imx6ull-esai";
reg = <0x59010000 0x10000>;
interrupts = <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&esai0_lpcg IMX_LPCG_CLK_4>,
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-audio.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-audio.dtsi
index e24e639b98ee..c9b55f02497a 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-audio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-audio.dtsi
@@ -134,7 +134,7 @@
};
esai1: esai@59810000 {
- compatible = "fsl,imx8qm-esai";
+ compatible = "fsl,imx8qm-esai", "fsl,imx6ull-esai";
reg = <0x59810000 0x10000>;
interrupts = <GIC_SPI 411 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&esai1_lpcg IMX_LPCG_CLK_0>,
diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
index d10f62eacfe0..e9c7a8265d71 100644
--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
@@ -1673,7 +1673,7 @@
netcmix_blk_ctrl: syscon@4c810000 {
compatible = "nxp,imx95-netcmix-blk-ctrl", "syscon";
- reg = <0x0 0x4c810000 0x0 0x10000>;
+ reg = <0x0 0x4c810000 0x0 0x8>;
#clock-cells = <1>;
clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>;
assigned-clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>;
diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
index 9f315a51a7c1..9da62d7c4d27 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
@@ -2440,6 +2440,7 @@
qcom,cmb-element-bits = <32>;
qcom,cmb-msrs-num = <32>;
+ status = "disabled";
out-ports {
port {
@@ -6092,7 +6093,7 @@
<0x0 0x40000000 0x0 0xf20>,
<0x0 0x40000f20 0x0 0xa8>,
<0x0 0x40001000 0x0 0x4000>,
- <0x0 0x40200000 0x0 0x100000>,
+ <0x0 0x40200000 0x0 0x1fe00000>,
<0x0 0x01c03000 0x0 0x1000>,
<0x0 0x40005000 0x0 0x2000>;
reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
@@ -6250,7 +6251,7 @@
<0x0 0x60000000 0x0 0xf20>,
<0x0 0x60000f20 0x0 0xa8>,
<0x0 0x60001000 0x0 0x4000>,
- <0x0 0x60200000 0x0 0x100000>,
+ <0x0 0x60200000 0x0 0x1fe00000>,
<0x0 0x01c13000 0x0 0x1000>,
<0x0 0x60005000 0x0 0x2000>;
reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
diff --git a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
index 975550139e10..66513fc8e67a 100644
--- a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
+++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
@@ -773,6 +773,10 @@
status = "okay";
};
+&usb_1_ss0_dwc3 {
+ dr_mode = "host";
+};
+
&usb_1_ss0_dwc3_hs {
remote-endpoint = <&pmic_glink_ss0_hs_in>;
};
@@ -801,6 +805,10 @@
status = "okay";
};
+&usb_1_ss1_dwc3 {
+ dr_mode = "host";
+};
+
&usb_1_ss1_dwc3_hs {
remote-endpoint = <&pmic_glink_ss1_hs_in>;
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
index 39f9d9cdc10d..d51a9bdcf67f 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
@@ -1197,6 +1197,10 @@
status = "okay";
};
+&usb_1_ss0_dwc3 {
+ dr_mode = "host";
+};
+
&usb_1_ss0_dwc3_hs {
remote-endpoint = <&pmic_glink_ss0_hs_in>;
};
@@ -1225,6 +1229,10 @@
status = "okay";
};
+&usb_1_ss1_dwc3 {
+ dr_mode = "host";
+};
+
&usb_1_ss1_dwc3_hs {
remote-endpoint = <&pmic_glink_ss1_hs_in>;
};
@@ -1253,6 +1261,10 @@
status = "okay";
};
+&usb_1_ss2_dwc3 {
+ dr_mode = "host";
+};
+
&usb_1_ss2_dwc3_hs {
remote-endpoint = <&pmic_glink_ss2_hs_in>;
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
index 88805629ed2b..7e4f46ad8edd 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
@@ -2924,7 +2924,7 @@
#address-cells = <3>;
#size-cells = <2>;
ranges = <0x01000000 0x0 0x00000000 0x0 0x70200000 0x0 0x100000>,
- <0x02000000 0x0 0x70300000 0x0 0x70300000 0x0 0x1d00000>;
+ <0x02000000 0x0 0x70300000 0x0 0x70300000 0x0 0x3d00000>;
bus-range = <0x00 0xff>;
dma-coherent;
@@ -4066,8 +4066,6 @@
dma-coherent;
- usb-role-switch;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -4321,8 +4319,6 @@
dma-coherent;
- usb-role-switch;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -4421,8 +4417,6 @@
dma-coherent;
- usb-role-switch;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 0597de415fe0..7d992c3c01ce 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -333,6 +333,7 @@
power-domain@RK3328_PD_HEVC {
reg = <RK3328_PD_HEVC>;
+ clocks = <&cru SCLK_VENC_CORE>;
#power-domain-cells = <0>;
};
power-domain@RK3328_PD_VIDEO {
diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
index ecaefe208e3e..695cccbdab0f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
@@ -350,6 +350,7 @@
assigned-clocks = <&pmucru CLK_PCIEPHY0_REF>;
assigned-clock-rates = <100000000>;
resets = <&cru SRST_PIPEPHY0>;
+ reset-names = "phy";
rockchip,pipe-grf = <&pipegrf>;
rockchip,pipe-phy-grf = <&pipe_phy_grf0>;
#phy-cells = <1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi b/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi
index 62be06f3b863..e55390629114 100644
--- a/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi
@@ -1681,6 +1681,7 @@
assigned-clocks = <&pmucru CLK_PCIEPHY1_REF>;
assigned-clock-rates = <100000000>;
resets = <&cru SRST_PIPEPHY1>;
+ reset-names = "phy";
rockchip,pipe-grf = <&pipegrf>;
rockchip,pipe-phy-grf = <&pipe_phy_grf1>;
#phy-cells = <1>;
@@ -1697,6 +1698,7 @@
assigned-clocks = <&pmucru CLK_PCIEPHY2_REF>;
assigned-clock-rates = <100000000>;
resets = <&cru SRST_PIPEPHY2>;
+ reset-names = "phy";
rockchip,pipe-grf = <&pipegrf>;
rockchip,pipe-phy-grf = <&pipe_phy_grf2>;
#phy-cells = <1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
index c44d001da169..d597112f1d5b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
@@ -72,7 +72,7 @@
rfkill {
compatible = "rfkill-gpio";
- label = "rfkill-pcie-wlan";
+ label = "rfkill-m2-wlan";
radio-type = "wlan";
shutdown-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi
index 76a6e8e517e9..c9749cb50076 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi
@@ -434,6 +434,7 @@
&sdmmc {
bus-width = <4>;
cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
disable-wp;
max-frequency = <150000000>;
no-mmc;
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index caba3e4bd09e..e75374d682f4 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -783,9 +783,6 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
return -EBUSY;
- if (__hyp_ack_skip_pgtable_check(tx))
- return 0;
-
return __hyp_check_page_state_range(addr, size,
PKVM_PAGE_SHARED_BORROWED);
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 456102bc0b55..6c5950b9ceac 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
+static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
{
@@ -327,48 +328,25 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
}
-/**
- * kvm_pmu_enable_counter_mask - enable selected PMU counters
- * @vcpu: The vcpu pointer
- * @val: the value guest writes to PMCNTENSET register
- *
- * Call perf_event_enable to start counting the perf event
- */
-void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
+static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
{
- int i;
- if (!kvm_vcpu_has_pmu(vcpu))
- return;
-
- if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
+ if (!pmc->perf_event) {
+ kvm_pmu_create_perf_event(pmc);
return;
+ }
- for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
- struct kvm_pmc *pmc;
-
- if (!(val & BIT(i)))
- continue;
-
- pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
+ perf_event_enable(pmc->perf_event);
+ if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
+ kvm_debug("fail to enable perf event\n");
+}
- if (!pmc->perf_event) {
- kvm_pmu_create_perf_event(pmc);
- } else {
- perf_event_enable(pmc->perf_event);
- if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
- kvm_debug("fail to enable perf event\n");
- }
- }
+static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
+{
+ if (pmc->perf_event)
+ perf_event_disable(pmc->perf_event);
}
-/**
- * kvm_pmu_disable_counter_mask - disable selected PMU counters
- * @vcpu: The vcpu pointer
- * @val: the value guest writes to PMCNTENCLR register
- *
- * Call perf_event_disable to stop counting the perf event
- */
-void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
+void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
{
int i;
@@ -376,16 +354,18 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
return;
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
- struct kvm_pmc *pmc;
+ struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
if (!(val & BIT(i)))
continue;
- pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
-
- if (pmc->perf_event)
- perf_event_disable(pmc->perf_event);
+ if (kvm_pmu_counter_is_enabled(pmc))
+ kvm_pmc_enable_perf_event(pmc);
+ else
+ kvm_pmc_disable_perf_event(pmc);
}
+
+ kvm_vcpu_pmu_restore_guest(vcpu);
}
/*
@@ -626,27 +606,28 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
val &= ~ARMV8_PMU_PMCR_LP;
+ /* Request a reload of the PMU to enable/disable affected counters */
+ if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E)
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
/* The reset bits don't indicate any state, and shouldn't be saved. */
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
- if (val & ARMV8_PMU_PMCR_E) {
- kvm_pmu_enable_counter_mask(vcpu,
- __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
- } else {
- kvm_pmu_disable_counter_mask(vcpu,
- __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
- }
-
if (val & ARMV8_PMU_PMCR_C)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
- unsigned long mask = kvm_pmu_accessible_counter_mask(vcpu);
- mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
+ /*
+ * Unlike other PMU sysregs, the controls in PMCR_EL0 always apply
+ * to the 'guest' range of counters and never the 'hyp' range.
+ */
+ unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) &
+ ~kvm_pmu_hyp_counter_mask(vcpu) &
+ ~BIT(ARMV8_PMU_CYCLE_IDX);
+
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
}
- kvm_vcpu_pmu_restore_guest(vcpu);
}
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
@@ -910,11 +891,11 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
{
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
- kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
-
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
+
+ kvm_pmu_reprogram_counter_mask(vcpu, mask);
}
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e2a5c2918d9e..634ff18a59a1 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1208,16 +1208,14 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
mask = kvm_pmu_accessible_counter_mask(vcpu);
if (p->is_write) {
val = p->regval & mask;
- if (r->Op2 & 0x1) {
+ if (r->Op2 & 0x1)
/* accessing PMCNTENSET_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
- kvm_pmu_enable_counter_mask(vcpu, val);
- kvm_vcpu_pmu_restore_guest(vcpu);
- } else {
+ else
/* accessing PMCNTENCLR_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
- kvm_pmu_disable_counter_mask(vcpu, val);
- }
+
+ kvm_pmu_reprogram_counter_mask(vcpu, val);
} else {
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
}
@@ -2450,6 +2448,26 @@ static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
return __el2_visibility(vcpu, rd, s1pie_visibility);
}
+static bool access_mdcr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 old = __vcpu_sys_reg(vcpu, MDCR_EL2);
+
+ if (!access_rw(vcpu, p, r))
+ return false;
+
+ /*
+ * Request a reload of the PMU to enable/disable the counters affected
+ * by HPME.
+ */
+ if ((old ^ __vcpu_sys_reg(vcpu, MDCR_EL2)) & MDCR_EL2_HPME)
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
+ return true;
+}
+
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -2983,7 +3001,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
- EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
+ EL2_REG(MDCR_EL2, access_mdcr, reset_val, 0),
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index 6d0d329cbb35..f9acf866c709 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -34,6 +34,8 @@ enum vcpu_ftr {
#define E500_TLB_BITMAP (1 << 30)
/* TLB1 entry is mapped by host TLB0 */
#define E500_TLB_TLB0 (1 << 29)
+/* entry is writable on the host */
+#define E500_TLB_WRITABLE (1 << 28)
/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
#define E500_TLB_MAS2_ATTR (0x7f)
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index e5a145b578a4..06caf8bbbe2b 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -45,11 +45,14 @@ static inline unsigned int tlb1_max_shadow_size(void)
return host_tlb_params[1].entries - tlbcam_index - 1;
}
-static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
+static inline u32 e500_shadow_mas3_attrib(u32 mas3, bool writable, int usermode)
{
/* Mask off reserved bits. */
mas3 &= MAS3_ATTRIB_MASK;
+ if (!writable)
+ mas3 &= ~(MAS3_UW|MAS3_SW);
+
#ifndef CONFIG_KVM_BOOKE_HV
if (!usermode) {
/* Guest is in supervisor mode,
@@ -242,17 +245,18 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
}
-static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref,
+static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
struct kvm_book3e_206_tlb_entry *gtlbe,
- kvm_pfn_t pfn, unsigned int wimg)
+ kvm_pfn_t pfn, unsigned int wimg,
+ bool writable)
{
ref->pfn = pfn;
ref->flags = E500_TLB_VALID;
+ if (writable)
+ ref->flags |= E500_TLB_WRITABLE;
/* Use guest supplied MAS2_G and MAS2_E */
ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
-
- return tlbe_is_writable(gtlbe);
}
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
@@ -305,6 +309,7 @@ static void kvmppc_e500_setup_stlbe(
{
kvm_pfn_t pfn = ref->pfn;
u32 pr = vcpu->arch.shared->msr & MSR_PR;
+ bool writable = !!(ref->flags & E500_TLB_WRITABLE);
BUG_ON(!(ref->flags & E500_TLB_VALID));
@@ -312,7 +317,7 @@ static void kvmppc_e500_setup_stlbe(
stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
- e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
+ e500_shadow_mas3_attrib(gtlbe->mas7_3, writable, pr);
}
static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -321,15 +326,14 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
struct tlbe_ref *ref)
{
struct kvm_memory_slot *slot;
- unsigned long pfn = 0; /* silence GCC warning */
+ unsigned int psize;
+ unsigned long pfn;
struct page *page = NULL;
unsigned long hva;
- int pfnmap = 0;
int tsize = BOOK3E_PAGESZ_4K;
int ret = 0;
unsigned long mmu_seq;
struct kvm *kvm = vcpu_e500->vcpu.kvm;
- unsigned long tsize_pages = 0;
pte_t *ptep;
unsigned int wimg = 0;
pgd_t *pgdir;
@@ -351,110 +355,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
hva = gfn_to_hva_memslot(slot, gfn);
- if (tlbsel == 1) {
- struct vm_area_struct *vma;
- mmap_read_lock(kvm->mm);
-
- vma = find_vma(kvm->mm, hva);
- if (vma && hva >= vma->vm_start &&
- (vma->vm_flags & VM_PFNMAP)) {
- /*
- * This VMA is a physically contiguous region (e.g.
- * /dev/mem) that bypasses normal Linux page
- * management. Find the overlap between the
- * vma and the memslot.
- */
-
- unsigned long start, end;
- unsigned long slot_start, slot_end;
-
- pfnmap = 1;
-
- start = vma->vm_pgoff;
- end = start +
- vma_pages(vma);
-
- pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
-
- slot_start = pfn - (gfn - slot->base_gfn);
- slot_end = slot_start + slot->npages;
-
- if (start < slot_start)
- start = slot_start;
- if (end > slot_end)
- end = slot_end;
-
- tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
- MAS1_TSIZE_SHIFT;
-
- /*
- * e500 doesn't implement the lowest tsize bit,
- * or 1K pages.
- */
- tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
-
- /*
- * Now find the largest tsize (up to what the guest
- * requested) that will cover gfn, stay within the
- * range, and for which gfn and pfn are mutually
- * aligned.
- */
-
- for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
- unsigned long gfn_start, gfn_end;
- tsize_pages = 1UL << (tsize - 2);
-
- gfn_start = gfn & ~(tsize_pages - 1);
- gfn_end = gfn_start + tsize_pages;
-
- if (gfn_start + pfn - gfn < start)
- continue;
- if (gfn_end + pfn - gfn > end)
- continue;
- if ((gfn & (tsize_pages - 1)) !=
- (pfn & (tsize_pages - 1)))
- continue;
-
- gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
- pfn &= ~(tsize_pages - 1);
- break;
- }
- } else if (vma && hva >= vma->vm_start &&
- is_vm_hugetlb_page(vma)) {
- unsigned long psize = vma_kernel_pagesize(vma);
-
- tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
- MAS1_TSIZE_SHIFT;
-
- /*
- * Take the largest page size that satisfies both host
- * and guest mapping
- */
- tsize = min(__ilog2(psize) - 10, tsize);
-
- /*
- * e500 doesn't implement the lowest tsize bit,
- * or 1K pages.
- */
- tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
- }
-
- mmap_read_unlock(kvm->mm);
- }
-
- if (likely(!pfnmap)) {
- tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
- pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page);
- if (is_error_noslot_pfn(pfn)) {
- if (printk_ratelimit())
- pr_err("%s: real page not found for gfn %lx\n",
- __func__, (long)gfn);
- return -EINVAL;
- }
-
- /* Align guest and physical address to page map boundaries */
- pfn &= ~(tsize_pages - 1);
- gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
+ pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &page);
+ if (is_error_noslot_pfn(pfn)) {
+ if (printk_ratelimit())
+ pr_err("%s: real page not found for gfn %lx\n",
+ __func__, (long)gfn);
+ return -EINVAL;
}
spin_lock(&kvm->mmu_lock);
@@ -472,14 +378,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
* can't run hence pfn won't change.
*/
local_irq_save(flags);
- ptep = find_linux_pte(pgdir, hva, NULL, NULL);
+ ptep = find_linux_pte(pgdir, hva, NULL, &psize);
if (ptep) {
pte_t pte = READ_ONCE(*ptep);
if (pte_present(pte)) {
wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
MAS2_WIMGE_MASK;
- local_irq_restore(flags);
} else {
local_irq_restore(flags);
pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
@@ -488,10 +393,72 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
goto out;
}
}
- writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
+ local_irq_restore(flags);
+
+ if (psize && tlbsel == 1) {
+ unsigned long psize_pages, tsize_pages;
+ unsigned long start, end;
+ unsigned long slot_start, slot_end;
+
+ psize_pages = 1UL << (psize - PAGE_SHIFT);
+ start = pfn & ~(psize_pages - 1);
+ end = start + psize_pages;
+
+ slot_start = pfn - (gfn - slot->base_gfn);
+ slot_end = slot_start + slot->npages;
+
+ if (start < slot_start)
+ start = slot_start;
+ if (end > slot_end)
+ end = slot_end;
+
+ tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
+ MAS1_TSIZE_SHIFT;
+
+ /*
+ * Any page size that doesn't satisfy the host mapping
+ * will fail the start and end tests.
+ */
+ tsize = min(psize - PAGE_SHIFT + BOOK3E_PAGESZ_4K, tsize);
+
+ /*
+ * e500 doesn't implement the lowest tsize bit,
+ * or 1K pages.
+ */
+ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
+
+ /*
+ * Now find the largest tsize (up to what the guest
+ * requested) that will cover gfn, stay within the
+ * range, and for which gfn and pfn are mutually
+ * aligned.
+ */
+
+ for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
+ unsigned long gfn_start, gfn_end;
+ tsize_pages = 1UL << (tsize - 2);
+
+ gfn_start = gfn & ~(tsize_pages - 1);
+ gfn_end = gfn_start + tsize_pages;
+
+ if (gfn_start + pfn - gfn < start)
+ continue;
+ if (gfn_end + pfn - gfn > end)
+ continue;
+ if ((gfn & (tsize_pages - 1)) !=
+ (pfn & (tsize_pages - 1)))
+ continue;
+
+ gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
+ pfn &= ~(tsize_pages - 1);
+ break;
+ }
+ }
+ kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg, writable);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
ref, gvaddr, stlbe);
+ writable = tlbe_is_writable(stlbe);
/* Clear i-cache for new pages */
kvmppc_mmu_flush_icache(pfn);
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 71aabc5c6713..125f5ecd9565 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -122,6 +122,7 @@ struct kernel_mapping {
extern struct kernel_mapping kernel_map;
extern phys_addr_t phys_ram_base;
+extern unsigned long vmemmap_start_pfn;
#define is_kernel_mapping(x) \
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index d4e99eef90ac..050fdc49b5ad 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -87,7 +87,7 @@
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
*/
-#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
+#define vmemmap ((struct page *)VMEMMAP_START - vmemmap_start_pfn)
#define PCI_IO_SIZE SZ_16M
#define PCI_IO_END VMEMMAP_START
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 6c82318065cf..3d250824178b 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -159,6 +159,7 @@ struct riscv_pmu_snapshot_data {
};
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
+#define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0)
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
#define RISCV_PLAT_FW_EVENT 0xFFFF
diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
index e5121b89acea..52f11bfd0079 100644
--- a/arch/riscv/include/asm/spinlock.h
+++ b/arch/riscv/include/asm/spinlock.h
@@ -3,8 +3,11 @@
#ifndef __ASM_RISCV_SPINLOCK_H
#define __ASM_RISCV_SPINLOCK_H
-#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
+#ifdef CONFIG_QUEUED_SPINLOCKS
#define _Q_PENDING_LOOPS (1 << 9)
+#endif
+
+#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
#define __no_arch_spinlock_redefine
#include <asm/ticket_spinlock.h>
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index c200d329d4bd..33a5a9f2a0d4 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -23,21 +23,21 @@
REG_S a0, TASK_TI_A0(tp)
csrr a0, CSR_CAUSE
/* Exclude IRQs */
- blt a0, zero, _new_vmalloc_restore_context_a0
+ blt a0, zero, .Lnew_vmalloc_restore_context_a0
REG_S a1, TASK_TI_A1(tp)
/* Only check new_vmalloc if we are in page/protection fault */
li a1, EXC_LOAD_PAGE_FAULT
- beq a0, a1, _new_vmalloc_kernel_address
+ beq a0, a1, .Lnew_vmalloc_kernel_address
li a1, EXC_STORE_PAGE_FAULT
- beq a0, a1, _new_vmalloc_kernel_address
+ beq a0, a1, .Lnew_vmalloc_kernel_address
li a1, EXC_INST_PAGE_FAULT
- bne a0, a1, _new_vmalloc_restore_context_a1
+ bne a0, a1, .Lnew_vmalloc_restore_context_a1
-_new_vmalloc_kernel_address:
+.Lnew_vmalloc_kernel_address:
/* Is it a kernel address? */
csrr a0, CSR_TVAL
- bge a0, zero, _new_vmalloc_restore_context_a1
+ bge a0, zero, .Lnew_vmalloc_restore_context_a1
/* Check if a new vmalloc mapping appeared that could explain the trap */
REG_S a2, TASK_TI_A2(tp)
@@ -69,7 +69,7 @@ _new_vmalloc_kernel_address:
/* Check the value of new_vmalloc for this cpu */
REG_L a2, 0(a0)
and a2, a2, a1
- beq a2, zero, _new_vmalloc_restore_context
+ beq a2, zero, .Lnew_vmalloc_restore_context
/* Atomically reset the current cpu bit in new_vmalloc */
amoxor.d a0, a1, (a0)
@@ -83,11 +83,11 @@ _new_vmalloc_kernel_address:
csrw CSR_SCRATCH, x0
sret
-_new_vmalloc_restore_context:
+.Lnew_vmalloc_restore_context:
REG_L a2, TASK_TI_A2(tp)
-_new_vmalloc_restore_context_a1:
+.Lnew_vmalloc_restore_context_a1:
REG_L a1, TASK_TI_A1(tp)
-_new_vmalloc_restore_context_a0:
+.Lnew_vmalloc_restore_context_a0:
REG_L a0, TASK_TI_A0(tp)
.endm
@@ -278,6 +278,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
#else
sret
#endif
+SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL)
SYM_CODE_END(ret_from_exception)
ASM_NOKPROBE(ret_from_exception)
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 1cd461f3d872..47d0ebeec93c 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -23,7 +23,7 @@ struct used_bucket {
struct relocation_head {
struct hlist_node node;
- struct list_head *rel_entry;
+ struct list_head rel_entry;
void *location;
};
@@ -634,7 +634,7 @@ process_accumulated_relocations(struct module *me,
location = rel_head_iter->location;
list_for_each_entry_safe(rel_entry_iter,
rel_entry_iter_tmp,
- rel_head_iter->rel_entry,
+ &rel_head_iter->rel_entry,
head) {
curr_type = rel_entry_iter->type;
reloc_handlers[curr_type].reloc_handler(
@@ -704,16 +704,7 @@ static int add_relocation_to_accumulate(struct module *me, int type,
return -ENOMEM;
}
- rel_head->rel_entry =
- kmalloc(sizeof(struct list_head), GFP_KERNEL);
-
- if (!rel_head->rel_entry) {
- kfree(entry);
- kfree(rel_head);
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(rel_head->rel_entry);
+ INIT_LIST_HEAD(&rel_head->rel_entry);
rel_head->location = location;
INIT_HLIST_NODE(&rel_head->node);
if (!current_head->first) {
@@ -722,7 +713,6 @@ static int add_relocation_to_accumulate(struct module *me, int type,
if (!bucket) {
kfree(entry);
- kfree(rel_head->rel_entry);
kfree(rel_head);
return -ENOMEM;
}
@@ -735,7 +725,7 @@ static int add_relocation_to_accumulate(struct module *me, int type,
}
/* Add relocation to head of discovered rel_head */
- list_add_tail(&entry->head, rel_head->rel_entry);
+ list_add_tail(&entry->head, &rel_head->rel_entry);
return 0;
}
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index 380a0e8cecc0..c0738d6c6498 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -30,7 +30,7 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
p->ainsn.api.restore = (unsigned long)p->addr + len;
patch_text_nosync(p->ainsn.api.insn, &p->opcode, len);
- patch_text_nosync(p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
+ patch_text_nosync((void *)p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 153a2db4c5fa..d4355c770c36 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -17,6 +17,7 @@
#ifdef CONFIG_FRAME_POINTER
extern asmlinkage void handle_exception(void);
+extern unsigned long ret_from_exception_end;
static inline int fp_is_valid(unsigned long fp, unsigned long sp)
{
@@ -71,7 +72,8 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame->fp;
pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
&frame->ra);
- if (pc == (unsigned long)handle_exception) {
+ if (pc >= (unsigned long)handle_exception &&
+ pc < (unsigned long)&ret_from_exception_end) {
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
break;
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 51ebfd23e007..8ff8e8b36524 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -35,7 +35,7 @@
int show_unhandled_signals = 1;
-static DEFINE_SPINLOCK(die_lock);
+static DEFINE_RAW_SPINLOCK(die_lock);
static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns)
{
@@ -81,7 +81,7 @@ void die(struct pt_regs *regs, const char *str)
oops_enter();
- spin_lock_irqsave(&die_lock, flags);
+ raw_spin_lock_irqsave(&die_lock, flags);
console_verbose();
bust_spinlocks(1);
@@ -100,7 +100,7 @@ void die(struct pt_regs *regs, const char *str)
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- spin_unlock_irqrestore(&die_lock, flags);
+ raw_spin_unlock_irqrestore(&die_lock, flags);
oops_exit();
if (in_interrupt())
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index fc53ce748c80..8d167e09f1fe 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -33,6 +33,7 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/soc.h>
+#include <asm/sparsemem.h>
#include <asm/tlbflush.h>
#include "../kernel/head.h"
@@ -62,6 +63,13 @@ EXPORT_SYMBOL(pgtable_l5_enabled);
phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#define VMEMMAP_ADDR_ALIGN (1ULL << SECTION_SIZE_BITS)
+
+unsigned long vmemmap_start_pfn __ro_after_init;
+EXPORT_SYMBOL(vmemmap_start_pfn);
+#endif
+
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
@@ -240,8 +248,12 @@ static void __init setup_bootmem(void)
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
*/
- if (!IS_ENABLED(CONFIG_XIP_KERNEL))
+ if (!IS_ENABLED(CONFIG_XIP_KERNEL)) {
phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
+#endif
+ }
/*
* In 64-bit, any use of __va/__pa before this point is wrong as we
@@ -1101,6 +1113,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
phys_ram_base = CONFIG_PHYS_RAM_BASE;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
+#endif
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index ea8dce299954..d4f031e086fc 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2678,9 +2678,13 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
kvm_s390_clear_float_irqs(dev->kvm);
break;
case KVM_DEV_FLIC_APF_ENABLE:
+ if (kvm_is_ucontrol(dev->kvm))
+ return -EINVAL;
dev->kvm->arch.gmap->pfault_enabled = 1;
break;
case KVM_DEV_FLIC_APF_DISABLE_WAIT:
+ if (kvm_is_ucontrol(dev->kvm))
+ return -EINVAL;
dev->kvm->arch.gmap->pfault_enabled = 0;
/*
* Make sure no async faults are in transition when
@@ -2894,6 +2898,8 @@ int kvm_set_routing_entry(struct kvm *kvm,
switch (ue->type) {
/* we store the userspace addresses instead of the guest addresses */
case KVM_IRQ_ROUTING_S390_ADAPTER:
+ if (kvm_is_ucontrol(kvm))
+ return -EINVAL;
e->set = set_adapter_int;
uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
if (uaddr == -EFAULT)
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 150b9387860a..a687695d8f68 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -854,7 +854,7 @@ unpin:
static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
gpa_t gpa)
{
- hpa_t hpa = (hpa_t) vsie_page->scb_o;
+ hpa_t hpa = virt_to_phys(vsie_page->scb_o);
if (hpa)
unpin_guest_page(vcpu->kvm, gpa, hpa);
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index 6bc1eb2a21bd..887b0b8e21e3 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -190,7 +190,8 @@ int ssp_get(struct task_struct *target, const struct user_regset *regset,
struct fpu *fpu = &target->thread.fpu;
struct cet_user_state *cetregs;
- if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) ||
+ !ssp_active(target, regset))
return -ENODEV;
sync_fpstate(fpu);
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index 9eed0c144dad..9e51242ed125 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -175,7 +175,6 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
noinstr void __static_call_update_early(void *tramp, void *func)
{
BUG_ON(system_state != SYSTEM_BOOTING);
- BUG_ON(!early_boot_irqs_disabled);
BUG_ON(static_call_initialized);
__text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
sync_core();
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 95dd7b795935..cad16c163611 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -6844,16 +6844,24 @@ static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq)
if (new_bfqq == waker_bfqq) {
/*
* If waker_bfqq is in the merge chain, and current
- * is the only procress.
+ * is the only process, waker_bfqq can be freed.
*/
if (bfqq_process_refs(waker_bfqq) == 1)
return NULL;
- break;
+
+ return waker_bfqq;
}
new_bfqq = new_bfqq->new_bfqq;
}
+ /*
+ * If waker_bfqq is not in the merge chain, and it's procress reference
+ * is 0, waker_bfqq can be freed.
+ */
+ if (bfqq_process_refs(waker_bfqq) == 0)
+ return NULL;
+
return waker_bfqq;
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 8274a17872ed..a972831dbd66 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -610,16 +610,28 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
return 0;
}
+/**
+ * acpi_video_device_EDID() - Get EDID from ACPI _DDC
+ * @device: video output device (LCD, CRT, ..)
+ * @edid: address for returned EDID pointer
+ * @length: _DDC length to request (must be a multiple of 128)
+ *
+ * Get EDID from ACPI _DDC. On success, a pointer to the EDID data is written
+ * to the @edid address, and the length of the EDID is returned. The caller is
+ * responsible for freeing the edid pointer.
+ *
+ * Return the length of EDID (positive value) on success or error (negative
+ * value).
+ */
static int
-acpi_video_device_EDID(struct acpi_video_device *device,
- union acpi_object **edid, int length)
+acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length)
{
- int status;
+ acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
-
+ int ret;
*edid = NULL;
@@ -636,16 +648,17 @@ acpi_video_device_EDID(struct acpi_video_device *device,
obj = buffer.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER)
- *edid = obj;
- else {
+ if (obj && obj->type == ACPI_TYPE_BUFFER) {
+ *edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
+ ret = *edid ? obj->buffer.length : -ENOMEM;
+ } else {
acpi_handle_debug(device->dev->handle,
"Invalid _DDC data for length %d\n", length);
- status = -EFAULT;
- kfree(obj);
+ ret = -EFAULT;
}
- return status;
+ kfree(obj);
+ return ret;
}
/* bus */
@@ -1435,9 +1448,7 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
- union acpi_object *buffer = NULL;
- acpi_status status;
- int i, length;
+ int i, length, ret;
if (!device || !acpi_driver_data(device))
return -EINVAL;
@@ -1477,16 +1488,10 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
}
for (length = 512; length > 0; length -= 128) {
- status = acpi_video_device_EDID(video_device, &buffer,
- length);
- if (ACPI_SUCCESS(status))
- break;
+ ret = acpi_video_device_EDID(video_device, edid, length);
+ if (ret > 0)
+ return ret;
}
- if (!length)
- continue;
-
- *edid = buffer->buffer.pointer;
- return length;
}
return -ENODEV;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 821867de43be..90aaec923889 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -441,6 +441,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook X1504VAP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "X1504VAP"),
+ },
+ },
+ {
/* Asus Vivobook X1704VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -646,6 +653,17 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
},
},
+ {
+ /*
+ * TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
+ * board-name is changed, so check OEM strings instead. Note
+ * OEM string matches are always exact matches.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=219614
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_OEM_STRING, "GM5HG0A"),
+ },
+ },
{ }
};
@@ -671,11 +689,11 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
for (i = 0; i < ARRAY_SIZE(override_table); i++) {
const struct irq_override_cmp *entry = &override_table[i];
- if (dmi_check_system(entry->system) &&
- entry->irq == gsi &&
+ if (entry->irq == gsi &&
entry->triggering == triggering &&
entry->polarity == polarity &&
- entry->shareable == shareable)
+ entry->shareable == shareable &&
+ dmi_check_system(entry->system))
return entry->override;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c085dd81ebe7..63ec2f218431 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4143,6 +4143,10 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
+ { "Samsung SSD 870 QVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NOLPM },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index cf160dd2c27b..b962da263eee 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -27,9 +27,17 @@ static ssize_t name##_read(struct file *file, struct kobject *kobj, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
\
- return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_bitmask_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
+ \
+ return n; \
} \
\
static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
@@ -37,9 +45,17 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
+ \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_list_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
\
- return cpumap_print_list_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ return n; \
}
define_id_show_func(physical_package_id, "%d");
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 7fd9d5ddce02..224eafc27dbe 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -1472,10 +1472,15 @@ EXPORT_SYMBOL_GPL(btmtk_usb_setup);
int btmtk_usb_shutdown(struct hci_dev *hdev)
{
+ struct btmtk_data *data = hci_get_priv(hdev);
struct btmtk_hci_wmt_params wmt_params;
u8 param = 0;
int err;
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ return err;
+
/* Disable the device */
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
@@ -1486,9 +1491,11 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ usb_autopm_put_interface(data->intf);
return err;
}
+ usb_autopm_put_interface(data->intf);
return 0;
}
EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 569f5b7d6e46..1230045d78a5 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1381,6 +1381,7 @@ static void btnxpuart_tx_work(struct work_struct *work)
while ((skb = nxp_dequeue(nxpdev))) {
len = serdev_device_write_buf(serdev, skb->data, skb->len);
+ serdev_device_wait_until_sent(serdev, 0);
hdev->stat.byte_tx += len;
skb_pull(skb, len);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 07645ce2119a..56ba4192c89c 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -917,7 +917,7 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
return err;
}
- mhi_cntrl->regs = pcim_iomap_region(pdev, 1 << bar_num, pci_name(pdev));
+ mhi_cntrl->regs = pcim_iomap_region(pdev, bar_num, pci_name(pdev));
if (IS_ERR(mhi_cntrl->regs)) {
err = PTR_ERR(mhi_cntrl->regs);
dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 92a83a9bb2e1..26e98fea991a 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -325,8 +325,6 @@ config QORIQ_CPUFREQ
This adds the CPUFreq driver support for Freescale QorIQ SoCs
which are capable of changing the CPU's frequency dynamically.
-endif
-
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
depends on ACPI_PROCESSOR
@@ -355,4 +353,6 @@ config ACPI_CPPC_CPUFREQ_FIE
If in doubt, say N.
+endif
+
endmenu
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 14462c092039..0c92a628bbd4 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -504,12 +504,12 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
int cpu, ret;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- struct device_node *np, *pds_node;
+ struct device_node *pds_node;
/* Detect OSI support based on CPU DT nodes */
sbi_cpuidle_use_osi = true;
for_each_possible_cpu(cpu) {
- np = of_cpu_device_node_get(cpu);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
if (np &&
of_property_present(np, "power-domains") &&
of_property_present(np, "power-domain-names")) {
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index f2992f92d8db..173ddcac540a 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -10,25 +10,27 @@
* DOC: teo-description
*
* The idea of this governor is based on the observation that on many systems
- * timer events are two or more orders of magnitude more frequent than any
- * other interrupts, so they are likely to be the most significant cause of CPU
- * wakeups from idle states. Moreover, information about what happened in the
- * (relatively recent) past can be used to estimate whether or not the deepest
- * idle state with target residency within the (known) time till the closest
- * timer event, referred to as the sleep length, is likely to be suitable for
- * the upcoming CPU idle period and, if not, then which of the shallower idle
- * states to choose instead of it.
+ * timer interrupts are two or more orders of magnitude more frequent than any
+ * other interrupt types, so they are likely to dominate CPU wakeup patterns.
+ * Moreover, in principle, the time when the next timer event is going to occur
+ * can be determined at the idle state selection time, although doing that may
+ * be costly, so it can be regarded as the most reliable source of information
+ * for idle state selection.
*
- * Of course, non-timer wakeup sources are more important in some use cases
- * which can be covered by taking a few most recent idle time intervals of the
- * CPU into account. However, even in that context it is not necessary to
- * consider idle duration values greater than the sleep length, because the
- * closest timer will ultimately wake up the CPU anyway unless it is woken up
- * earlier.
+ * Of course, non-timer wakeup sources are more important in some use cases,
+ * but even then it is generally unnecessary to consider idle duration values
+ * greater than the time time till the next timer event, referred as the sleep
+ * length in what follows, because the closest timer will ultimately wake up the
+ * CPU anyway unless it is woken up earlier.
*
- * Thus this governor estimates whether or not the prospective idle duration of
- * a CPU is likely to be significantly shorter than the sleep length and selects
- * an idle state for it accordingly.
+ * However, since obtaining the sleep length may be costly, the governor first
+ * checks if it can select a shallow idle state using wakeup pattern information
+ * from recent times, in which case it can do without knowing the sleep length
+ * at all. For this purpose, it counts CPU wakeup events and looks for an idle
+ * state whose target residency has not exceeded the idle duration (measured
+ * after wakeup) in the majority of relevant recent cases. If the target
+ * residency of that state is small enough, it may be used right away and the
+ * sleep length need not be determined.
*
* The computations carried out by this governor are based on using bins whose
* boundaries are aligned with the target residency parameter values of the CPU
@@ -39,7 +41,11 @@
* idle state 2, the third bin spans from the target residency of idle state 2
* up to, but not including, the target residency of idle state 3 and so on.
* The last bin spans from the target residency of the deepest idle state
- * supplied by the driver to infinity.
+ * supplied by the driver to the scheduler tick period length or to infinity if
+ * the tick period length is less than the target residency of that state. In
+ * the latter case, the governor also counts events with the measured idle
+ * duration between the tick period length and the target residency of the
+ * deepest idle state.
*
* Two metrics called "hits" and "intercepts" are associated with each bin.
* They are updated every time before selecting an idle state for the given CPU
@@ -49,47 +55,46 @@
* sleep length and the idle duration measured after CPU wakeup fall into the
* same bin (that is, the CPU appears to wake up "on time" relative to the sleep
* length). In turn, the "intercepts" metric reflects the relative frequency of
- * situations in which the measured idle duration is so much shorter than the
- * sleep length that the bin it falls into corresponds to an idle state
- * shallower than the one whose bin is fallen into by the sleep length (these
- * situations are referred to as "intercepts" below).
+ * non-timer wakeup events for which the measured idle duration falls into a bin
+ * that corresponds to an idle state shallower than the one whose bin is fallen
+ * into by the sleep length (these events are also referred to as "intercepts"
+ * below).
*
* In order to select an idle state for a CPU, the governor takes the following
* steps (modulo the possible latency constraint that must be taken into account
* too):
*
- * 1. Find the deepest CPU idle state whose target residency does not exceed
- * the current sleep length (the candidate idle state) and compute 2 sums as
- * follows:
+ * 1. Find the deepest enabled CPU idle state (the candidate idle state) and
+ * compute 2 sums as follows:
*
- * - The sum of the "hits" and "intercepts" metrics for the candidate state
- * and all of the deeper idle states (it represents the cases in which the
- * CPU was idle long enough to avoid being intercepted if the sleep length
- * had been equal to the current one).
+ * - The sum of the "hits" metric for all of the idle states shallower than
+ * the candidate one (it represents the cases in which the CPU was likely
+ * woken up by a timer).
*
- * - The sum of the "intercepts" metrics for all of the idle states shallower
- * than the candidate one (it represents the cases in which the CPU was not
- * idle long enough to avoid being intercepted if the sleep length had been
- * equal to the current one).
+ * - The sum of the "intercepts" metric for all of the idle states shallower
+ * than the candidate one (it represents the cases in which the CPU was
+ * likely woken up by a non-timer wakeup source).
*
- * 2. If the second sum is greater than the first one the CPU is likely to wake
- * up early, so look for an alternative idle state to select.
+ * 2. If the second sum computed in step 1 is greater than a half of the sum of
+ * both metrics for the candidate state bin and all subsequent bins(if any),
+ * a shallower idle state is likely to be more suitable, so look for it.
*
- * - Traverse the idle states shallower than the candidate one in the
+ * - Traverse the enabled idle states shallower than the candidate one in the
* descending order.
*
* - For each of them compute the sum of the "intercepts" metrics over all
* of the idle states between it and the candidate one (including the
* former and excluding the latter).
*
- * - If each of these sums that needs to be taken into account (because the
- * check related to it has indicated that the CPU is likely to wake up
- * early) is greater than a half of the corresponding sum computed in step
- * 1 (which means that the target residency of the state in question had
- * not exceeded the idle duration in over a half of the relevant cases),
- * select the given idle state instead of the candidate one.
+ * - If this sum is greater than a half of the second sum computed in step 1,
+ * use the given idle state as the new candidate one.
*
- * 3. By default, select the candidate state.
+ * 3. If the current candidate state is state 0 or its target residency is short
+ * enough, return it and prevent the scheduler tick from being stopped.
+ *
+ * 4. Obtain the sleep length value and check if it is below the target
+ * residency of the current candidate state, in which case a new shallower
+ * candidate state needs to be found, so look for it.
*/
#include <linux/cpuidle.h>
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index 6749d4dd6d64..7f4d78fd800e 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -237,9 +237,9 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = {
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data2 = {
.label = "ls2k2000_gpio",
.mode = BIT_CTRL_MODE,
- .conf_offset = 0x84,
- .in_offset = 0x88,
- .out_offset = 0x80,
+ .conf_offset = 0x4,
+ .in_offset = 0x8,
+ .out_offset = 0x0,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls3a5000_data = {
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index f387dad81f29..686ae3d11ba3 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -1027,6 +1027,30 @@ static void gpio_sim_device_deactivate(struct gpio_sim_device *dev)
dev->pdev = NULL;
}
+static void
+gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
+{
+ struct configfs_subsystem *subsys = dev->group.cg_subsys;
+ struct gpio_sim_bank *bank;
+ struct gpio_sim_line *line;
+
+ /*
+ * The device only needs to depend on leaf line entries. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(bank, &dev->bank_list, siblings) {
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (lock)
+ WARN_ON(configfs_depend_item_unlocked(
+ subsys, &line->group.cg_item));
+ else
+ configfs_undepend_item_unlocked(
+ &line->group.cg_item);
+ }
+ }
+}
+
static ssize_t
gpio_sim_device_config_live_store(struct config_item *item,
const char *page, size_t count)
@@ -1039,14 +1063,24 @@ gpio_sim_device_config_live_store(struct config_item *item,
if (ret)
return ret;
- guard(mutex)(&dev->lock);
+ if (live)
+ gpio_sim_device_lockup_configfs(dev, true);
- if (live == gpio_sim_device_is_live(dev))
- ret = -EPERM;
- else if (live)
- ret = gpio_sim_device_activate(dev);
- else
- gpio_sim_device_deactivate(dev);
+ scoped_guard(mutex, &dev->lock) {
+ if (live == gpio_sim_device_is_live(dev))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_sim_device_activate(dev);
+ else
+ gpio_sim_device_deactivate(dev);
+ }
+
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret)
+ gpio_sim_device_lockup_configfs(dev, false);
return ret ?: count;
}
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index 91b6352c957c..e89f299f2140 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -1410,7 +1410,7 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
size_t num_entries = gpio_virtuser_get_lookup_count(dev);
struct gpio_virtuser_lookup_entry *entry;
struct gpio_virtuser_lookup *lookup;
- unsigned int i = 0;
+ unsigned int i = 0, idx;
lockdep_assert_held(&dev->lock);
@@ -1424,12 +1424,12 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
return -ENOMEM;
list_for_each_entry(lookup, &dev->lookup_list, siblings) {
+ idx = 0;
list_for_each_entry(entry, &lookup->entry_list, siblings) {
- table->table[i] =
+ table->table[i++] =
GPIO_LOOKUP_IDX(entry->key,
entry->offset < 0 ? U16_MAX : entry->offset,
- lookup->con_id, i, entry->flags);
- i++;
+ lookup->con_id, idx++, entry->flags);
}
}
@@ -1439,6 +1439,15 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
return 0;
}
+static void
+gpio_virtuser_remove_lookup_table(struct gpio_virtuser_device *dev)
+{
+ gpiod_remove_lookup_table(dev->lookup_table);
+ kfree(dev->lookup_table->dev_id);
+ kfree(dev->lookup_table);
+ dev->lookup_table = NULL;
+}
+
static struct fwnode_handle *
gpio_virtuser_make_device_swnode(struct gpio_virtuser_device *dev)
{
@@ -1487,10 +1496,8 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
pdevinfo.fwnode = swnode;
ret = gpio_virtuser_make_lookup_table(dev);
- if (ret) {
- fwnode_remove_software_node(swnode);
- return ret;
- }
+ if (ret)
+ goto err_remove_swnode;
reinit_completion(&dev->probe_completion);
dev->driver_bound = false;
@@ -1498,23 +1505,31 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
- fwnode_remove_software_node(swnode);
- return PTR_ERR(pdev);
+ goto err_remove_lookup_table;
}
wait_for_completion(&dev->probe_completion);
bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
if (!dev->driver_bound) {
- platform_device_unregister(pdev);
- fwnode_remove_software_node(swnode);
- return -ENXIO;
+ ret = -ENXIO;
+ goto err_unregister_pdev;
}
dev->pdev = pdev;
return 0;
+
+err_unregister_pdev:
+ platform_device_unregister(pdev);
+err_remove_lookup_table:
+ gpio_virtuser_remove_lookup_table(dev);
+err_remove_swnode:
+ fwnode_remove_software_node(swnode);
+
+ return ret;
}
static void
@@ -1526,10 +1541,33 @@ gpio_virtuser_device_deactivate(struct gpio_virtuser_device *dev)
swnode = dev_fwnode(&dev->pdev->dev);
platform_device_unregister(dev->pdev);
+ gpio_virtuser_remove_lookup_table(dev);
fwnode_remove_software_node(swnode);
dev->pdev = NULL;
- gpiod_remove_lookup_table(dev->lookup_table);
- kfree(dev->lookup_table);
+}
+
+static void
+gpio_virtuser_device_lockup_configfs(struct gpio_virtuser_device *dev, bool lock)
+{
+ struct configfs_subsystem *subsys = dev->group.cg_subsys;
+ struct gpio_virtuser_lookup_entry *entry;
+ struct gpio_virtuser_lookup *lookup;
+
+ /*
+ * The device only needs to depend on leaf lookup entries. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(lookup, &dev->lookup_list, siblings) {
+ list_for_each_entry(entry, &lookup->entry_list, siblings) {
+ if (lock)
+ WARN_ON(configfs_depend_item_unlocked(
+ subsys, &entry->group.cg_item));
+ else
+ configfs_undepend_item_unlocked(
+ &entry->group.cg_item);
+ }
+ }
}
static ssize_t
@@ -1544,15 +1582,24 @@ gpio_virtuser_device_config_live_store(struct config_item *item,
if (ret)
return ret;
- guard(mutex)(&dev->lock);
+ if (live)
+ gpio_virtuser_device_lockup_configfs(dev, true);
- if (live == gpio_virtuser_device_is_live(dev))
- return -EPERM;
+ scoped_guard(mutex, &dev->lock) {
+ if (live == gpio_virtuser_device_is_live(dev))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_virtuser_device_activate(dev);
+ else
+ gpio_virtuser_device_deactivate(dev);
+ }
- if (live)
- ret = gpio_virtuser_device_activate(dev);
- else
- gpio_virtuser_device_deactivate(dev);
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret)
+ gpio_virtuser_device_lockup_configfs(dev, false);
return ret ?: count;
}
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index c6a8f2c82680..792d94c49077 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -65,7 +65,7 @@ struct xgpio_instance {
DECLARE_BITMAP(state, 64);
DECLARE_BITMAP(last_irq_read, 64);
DECLARE_BITMAP(dir, 64);
- spinlock_t gpio_lock; /* For serializing operations */
+ raw_spinlock_t gpio_lock; /* For serializing operations */
int irq;
DECLARE_BITMAP(enable, 64);
DECLARE_BITMAP(rising_edge, 64);
@@ -179,14 +179,14 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
__assign_bit(bit, chip->state, val);
xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -210,7 +210,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
bitmap_replace(state, chip->state, hw_bits, hw_mask, 64);
@@ -218,7 +218,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_copy(chip->state, state, 64);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -236,13 +236,13 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
__set_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -265,7 +265,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
__assign_bit(bit, chip->state, val);
@@ -275,7 +275,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
__clear_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -398,7 +398,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
int bit = xgpio_to_bit(chip, irq_offset);
u32 mask = BIT(bit / 32), temp;
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__clear_bit(bit, chip->enable);
@@ -408,7 +408,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
temp &= ~mask;
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
gpiochip_disable_irq(&chip->gc, irq_offset);
}
@@ -428,7 +428,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
gpiochip_enable_irq(&chip->gc, irq_offset);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__set_bit(bit, chip->enable);
@@ -447,7 +447,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -512,7 +512,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
- spin_lock(&chip->gpio_lock);
+ raw_spin_lock(&chip->gpio_lock);
xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
@@ -529,7 +529,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
bitmap_copy(chip->last_irq_read, all, 64);
bitmap_or(all, rising, falling, 64);
- spin_unlock(&chip->gpio_lock);
+ raw_spin_unlock(&chip->gpio_lock);
dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
@@ -620,7 +620,7 @@ static int xgpio_probe(struct platform_device *pdev)
bitmap_set(chip->hw_map, 0, width[0]);
bitmap_set(chip->hw_map, 32, width[1]);
- spin_lock_init(&chip->gpio_lock);
+ raw_spin_lock_init(&chip->gpio_lock);
chip->gc.base = -1;
chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 3afcd1e8aa54..c4e733c2e75e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -715,8 +715,9 @@ err:
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
- if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
- ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
+ if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
+ (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
amdgpu_gfx_off_ctrl(adev, idle);
} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 2d4b67175b55..328a1b963548 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -122,6 +122,10 @@ static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3))
+ return 0;
+
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
return 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 69a6b6dba0a5..1d155463d044 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1989,6 +1989,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2007,15 +2008,19 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
}
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2031,9 +2036,12 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 8b512dc28df8..071f187f5e28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -193,8 +193,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
- (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
- amdgpu_vm_need_pipeline_sync(ring, job))) {
+ need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
+
need_pipe_sync = true;
if (tmp)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 7d26a962f811..ff5e52025266 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -567,7 +567,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
else
remaining_size -= size;
}
- mutex_unlock(&mgr->lock);
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
@@ -584,6 +583,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
(u64)vres->base.size,
&vres->blocks);
}
+ mutex_unlock(&mgr->lock);
vres->base.start = 0;
size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
index 312dfa84f29f..a8abc3091801 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -350,10 +350,27 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
{
uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
uint32_t flags = pdd->process->dbg_flags;
+ struct amdgpu_device *adev = pdd->dev->adev;
+ int r;
if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
return 0;
+ if (!pdd->proc_ctx_cpu_ptr) {
+ r = amdgpu_amdkfd_alloc_gtt_mem(adev,
+ AMDGPU_MES_PROC_CTX_SIZE,
+ &pdd->proc_ctx_bo,
+ &pdd->proc_ctx_gpu_addr,
+ &pdd->proc_ctx_cpu_ptr,
+ false);
+ if (r) {
+ dev_err(adev->dev,
+ "failed to allocate process context bo\n");
+ return r;
+ }
+ memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+ }
+
return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
pdd->watch_points, flags, sq_trap_en);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d0ee173acf82..edfe0b4788f4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1160,7 +1160,8 @@ static void kfd_process_wq_release(struct work_struct *work)
*/
synchronize_rcu();
ef = rcu_access_pointer(p->ef);
- dma_fence_signal(ef);
+ if (ef)
+ dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 48be917e7bc5..5f216d626cbb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8400,16 +8400,6 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
{
- /*
- * We have no guarantee that the frontend index maps to the same
- * backend index - some even map to more than one.
- *
- * TODO: Use a different interrupt or check DC itself for the mapping.
- */
- int irq_type =
- amdgpu_display_crtc_idx_to_irq_type(
- adev,
- acrtc->crtc_id);
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@@ -8435,28 +8425,7 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
-
- amdgpu_irq_get(
- adev,
- &adev->pageflip_irq,
- irq_type);
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_get(
- adev,
- &adev->vline0_irq,
- irq_type);
-#endif
} else {
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_put(
- adev,
- &adev->vline0_irq,
- irq_type);
-#endif
- amdgpu_irq_put(
- adev,
- &adev->pageflip_irq,
- irq_type);
drm_crtc_vblank_off(&acrtc->base);
}
}
@@ -8927,6 +8896,7 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
if (pr->config.replay_supported && !pr->replay_feature_enabled)
@@ -8953,14 +8923,15 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
*/
- if (acrtc_attach->dm_irq_params.allow_sr_entry &&
+ if (!vrr_active &&
+ acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
(current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
if (pr->replay_feature_enabled && !pr->replay_allow_active)
amdgpu_dm_replay_enable(acrtc_state->stream, true);
- if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
+ if (psr->psr_version == DC_PSR_VERSION_SU_1 &&
!psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -9131,7 +9102,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
timestamp_ns;
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
mutex_unlock(&dm->dc_lock);
}
}
@@ -9297,11 +9268,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
- if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) {
if (acrtc_state->stream->link->replay_settings.replay_allow_active)
amdgpu_dm_replay_disable(acrtc_state->stream);
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
}
mutex_unlock(&dm->dc_lock);
@@ -11155,8 +11126,8 @@ dm_get_plane_scale(struct drm_plane_state *plane_state,
int plane_src_w, plane_src_h;
dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
- *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
- *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+ *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0;
+ *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0;
}
/*
@@ -11410,6 +11381,25 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
return 0;
}
+static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state, *old_plane_state;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ new_plane_state = drm_atomic_get_plane_state(state, plane);
+ old_plane_state = drm_atomic_get_plane_state(state, plane);
+
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
+ return true;
+ }
+
+ return false;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
*
@@ -11607,10 +11597,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
- if (old_plane_state->fb && new_plane_state->fb &&
- get_mem_type(old_plane_state->fb) !=
- get_mem_type(new_plane_state->fb))
- lock_and_validation_needed = true;
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
@@ -11905,9 +11891,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/*
* Only allow async flips for fast updates that don't change
- * the FB pitch, the DCC state, rotation, etc.
+ * the FB pitch, the DCC state, rotation, mem_type, etc.
*/
- if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ if (new_crtc_state->async_flip &&
+ (lock_and_validation_needed ||
+ amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6464a8378387..2227cd8e4a89 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -697,6 +697,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ uint32_t mst_local_bw;
+ uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index f936a35fa9eb..0f6ba7b1575d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -30,6 +30,7 @@
#include "amdgpu_dm.h"
#include "dc.h"
#include "amdgpu_securedisplay.h"
+#include "amdgpu_dm_psr.h"
static const char *const pipe_crc_sources[] = {
"none",
@@ -224,6 +225,10 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
+ /* For PSR1, check that the panel has exited PSR */
+ if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ amdgpu_dm_psr_wait_disable(stream_state);
+
/* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!dc_stream_configure_crc(stream_state->ctx->dc,
@@ -357,6 +362,17 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
+ /*
+ * Reading the CRC requires the vblank interrupt handler to be
+ * enabled. Keep a reference until CRC capture stops.
+ */
+ enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
+ if (!enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ goto cleanup;
+ }
+
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
@@ -367,16 +383,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
goto cleanup;
}
- /*
- * Reading the CRC requires the vblank interrupt handler to be
- * enabled. Keep a reference until CRC capture stops.
- */
- enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
if (!enabled && enable) {
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- goto cleanup;
-
if (dm_is_crc_source_dprx(source)) {
if (drm_dp_start_crc(aux, crtc)) {
DRM_DEBUG_DRIVER("dp start crc failed\n");
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 64a041c2af05..36a830a7440f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -93,7 +93,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
return rc;
}
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
@@ -142,7 +142,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
- amdgpu_dm_psr_disable(vblank_work->stream);
+ amdgpu_dm_psr_disable(vblank_work->stream, false);
} else if (link->psr_settings.psr_feature_enabled &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index 17e948753f59..c1212947a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -37,7 +37,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state);
int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 6a97bb2d9160..8b7c1a428f64 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3606,7 +3606,7 @@ static int crc_win_update_set(void *data, u64 val)
/* PSR may write to OTG CRC window control register,
* so close it before starting secure_display.
*/
- amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
+ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream, true);
spin_lock_irq(&adev_to_drm(adev)->event_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 6e4359490613..1080075ccb17 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -155,6 +155,17 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
return 0;
}
+
+static inline void
+amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector)
+{
+ aconnector->drm_edid = NULL;
+ aconnector->dsc_aux = NULL;
+ aconnector->mst_output_port->passthrough_aux = NULL;
+ aconnector->mst_local_bw = 0;
+ aconnector->vc_full_pbn = 0;
+}
+
static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
{
@@ -182,9 +193,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@@ -504,9 +513,7 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
@@ -1819,9 +1826,18 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct drm_dp_mst_port *immediate_upstream_port = NULL;
uint32_t end_link_bw = 0;
- /*Get last DP link BW capability*/
- if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
- if (stream_kbps > end_link_bw) {
+ /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/
+ if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV &&
+ aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) {
+ if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) {
+ dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw);
+ aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn;
+ aconnector->mst_local_bw = end_link_bw;
+ } else {
+ end_link_bw = aconnector->mst_local_bw;
+ }
+
+ if (end_link_bw > 0 && stream_kbps > end_link_bw) {
DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
@@ -1835,11 +1851,15 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (immediate_upstream_port) {
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
- if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
- "Max dsc compression can't fit into MST available bw\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
- }
+ } else {
+ /* For topology LCT 1 case - only one mstb*/
+ virtual_channel_bw_in_kbps = root_link_bw_in_kbps;
+ }
+
+ if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Max dsc compression can't fit into MST available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index f40240aafe98..45858bf1523d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -201,14 +201,13 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*
* Return: true if success
*/
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait)
{
- unsigned int power_opt = 0;
bool psr_enable = false;
DRM_DEBUG_DRIVER("Disabling psr...\n");
- return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt);
+ return dc_link_set_psr_allow_active(stream->link, &psr_enable, wait, false, NULL);
}
/*
@@ -251,3 +250,33 @@ bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
return allow_active;
}
+
+/**
+ * amdgpu_dm_psr_wait_disable() - Wait for eDP panel to exit PSR
+ * @stream: stream state attached to the eDP link
+ *
+ * Waits for a max of 500ms for the eDP panel to exit PSR.
+ *
+ * Return: true if panel exited PSR, false otherwise.
+ */
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream)
+{
+ enum dc_psr_state psr_state = PSR_STATE0;
+ struct dc_link *link = stream->link;
+ int retry_count;
+
+ if (link == NULL)
+ return false;
+
+ for (retry_count = 0; retry_count <= 1000; retry_count++) {
+ dc_link_get_psr_state(link, &psr_state);
+ if (psr_state == PSR_STATE0)
+ break;
+ udelay(500);
+ }
+
+ if (retry_count == 1000)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index cd2d45c2b5ef..e2366321a3c1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -34,8 +34,9 @@
void amdgpu_dm_set_psr_caps(struct dc_link *link);
void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm);
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream);
#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 49fe7dcf9372..2723558049d6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -4510,7 +4510,7 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
struct pipe_split_policy_backup policy;
struct dc_state *intermediate_context;
struct dc_state *old_current_state = dc->current_state;
- struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
+ struct dc_surface_update srf_updates[MAX_SURFACES] = {0};
int surface_count;
/*
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index e006f816ff2f..1b2cce127981 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -483,9 +483,9 @@ bool dc_state_add_plane(
if (stream_status == NULL) {
dm_error("Existing stream not found; failed to attach surface!\n");
goto out;
- } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ } else if (stream_status->plane_count == MAX_SURFACES) {
dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
- plane_state, MAX_SURFACE_NUM);
+ plane_state, MAX_SURFACES);
goto out;
} else if (!otg_master_pipe) {
goto out;
@@ -600,7 +600,7 @@ bool dc_state_rem_all_planes_for_stream(
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < state->stream_count; i++)
if (state->streams[i] == stream) {
@@ -875,7 +875,7 @@ bool dc_state_rem_all_phantom_planes_for_stream(
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < state->stream_count; i++)
if (state->streams[i] == phantom_stream) {
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index e9b9126c0401..08c5a315b3a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -57,7 +57,7 @@ struct dmub_notification;
#define DC_VER "3.2.310"
-#define MAX_SURFACES 3
+#define MAX_SURFACES 4
#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
@@ -1398,7 +1398,7 @@ struct dc_scratch_space {
* store current value in plane states so we can still recover
* a valid current state during dc update.
*/
- struct dc_plane_state plane_states[MAX_SURFACE_NUM];
+ struct dc_plane_state plane_states[MAX_SURFACES];
struct dc_stream_state stream_state;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 413970588a26..860506c6bda4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -56,7 +56,7 @@ struct dc_stream_status {
int plane_count;
int audio_inst;
struct timing_sync_info timing_sync_info;
- struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
+ struct dc_plane_state *plane_states[MAX_SURFACES];
bool is_abm_supported;
struct mall_stream_config mall_stream_config;
bool fpo_in_use;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index edf4df1d03b5..9466b63644d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -76,7 +76,6 @@ struct dc_perf_trace {
unsigned long last_entry_write;
};
-#define MAX_SURFACE_NUM 6
#define NUM_PIXEL_FORMATS 10
enum tiling_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bf636b28e3e1..5bb8b78bf250 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -63,7 +63,8 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
bool should_use_dmub_lock(struct dc_link *link)
{
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_1)
return true;
if (link->replay_settings.replay_feature_enabled)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index beed7adbbd43..47d785204f29 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 1,
+ .do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index 072bd0539605..6b2ab4ec2b5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -66,11 +66,15 @@ static inline double dml_max5(double a, double b, double c, double d, double e)
static inline double dml_ceil(double a, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_ceil2(a, granularity);
}
static inline double dml_floor(double a, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_floor2(a, granularity);
}
@@ -114,11 +118,15 @@ static inline double dml_ceil_2(double f)
static inline double dml_ceil_ex(double x, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_ceil2(x, granularity);
}
static inline double dml_floor_ex(double x, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_floor2(x, granularity);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 601320b1be81..ecfa3c898e09 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -579,8 +579,8 @@ static void CalculateBytePerPixelAndBlockSizes(
{
*BytePerPixelDETY = 0;
*BytePerPixelDETC = 0;
- *BytePerPixelY = 0;
- *BytePerPixelC = 0;
+ *BytePerPixelY = 1;
+ *BytePerPixelC = 1;
if (SourcePixelFormat == dml2_444_64) {
*BytePerPixelDETY = 8;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index 3d29169dd6bb..6b3b8803e0ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -813,7 +813,7 @@ static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struc
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index fa11f075d1f9..d9136d252ee1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1398,12 +1398,12 @@ void dcn32_disable_link_output(struct dc_link *link,
link_hwss->disable_link_output(link, link_res, signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
-
- if (signal == SIGNAL_TYPE_EDP &&
- link->dc->hwss.edp_power_control &&
- !link->skip_implict_edp_power_control)
- link->dc->hwss.edp_power_control(link, false);
- else if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ /*
+ * Add the logic to extract BOTH power up and power down sequences
+ * from enable/disable link output and only call edp panel control
+ * in enable_link_dp and disable_link_dp once.
+ */
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index ae3563d71fa0..356d9422b411 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -303,5 +303,7 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value);
+
+void smu_v13_0_interrupt_work(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 2bfea740dace..2d1e7ebd1bac 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -1320,11 +1320,11 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
return 0;
}
-static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu)
+void smu_v13_0_interrupt_work(struct smu_context *smu)
{
- return smu_cmn_send_smc_msg(smu,
- SMU_MSG_ReenableAcDcInterrupt,
- NULL);
+ smu_cmn_send_smc_msg(smu,
+ SMU_MSG_ReenableAcDcInterrupt,
+ NULL);
}
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
@@ -1377,12 +1377,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
switch (ctxid) {
case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
dev_dbg(adev->dev, "Switched to AC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(smu);
+ schedule_work(&smu->interrupt_work);
adev->pm.ac_power = true;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
dev_dbg(adev->dev, "Switched to DC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(smu);
+ schedule_work(&smu->interrupt_work);
adev->pm.ac_power = false;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 3aa705aae4c0..0551a3311217 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2643,11 +2643,12 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
&backend_workload_mask);
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
- if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7300))) ||
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
- smu->adev->pm.fw_version >= 0x00504500)) {
+ if ((workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) &&
+ ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500))) {
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_POWERSAVING);
@@ -3219,6 +3220,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check,
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
+ .interrupt_work = smu_v13_0_interrupt_work,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index aabb94796005..55ef18517b0f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2797,6 +2797,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
+ .interrupt_work = smu_v13_0_interrupt_work,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index cbabd4e20d3e..5f138a5692c7 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -845,8 +845,8 @@ static int it6263_probe(struct i2c_client *client)
it->lvds_i2c = devm_i2c_new_dummy_device(dev, client->adapter,
LVDS_INPUT_CTRL_I2C_ADDR);
if (IS_ERR(it->lvds_i2c))
- dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
- "failed to allocate I2C device for LVDS\n");
+ return dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
+ "failed to allocate I2C device for LVDS\n");
it->lvds_regmap = devm_regmap_init_i2c(it->lvds_i2c,
&it6263_lvds_regmap_config);
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 320c297008aa..3a5a4f92c979 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -459,7 +459,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return ERR_PTR(-EINVAL);
- if (bridge_connector->bridge_hdmi)
+ if (bridge_connector->bridge_hdmi) {
+ if (!connector->ycbcr_420_allowed)
+ supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
+
ret = drmm_connector_hdmi_init(drm, connector,
bridge_connector->bridge_hdmi->vendor,
bridge_connector->bridge_hdmi->product,
@@ -468,10 +471,11 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector_type, ddc,
supported_formats,
max_bpc);
- else
+ } else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
connector_type, ddc);
+ }
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c6af46dd02bf..241a384ebce3 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -207,6 +207,10 @@ void drm_bridge_add(struct drm_bridge *bridge)
{
mutex_init(&bridge->hpd_mutex);
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
+ BIT(HDMI_COLORSPACE_YUV420));
+
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index fc35f47e2849..ca7f43c8d6f1 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -507,6 +507,9 @@ int drmm_connector_hdmi_init(struct drm_device *dev,
if (!supported_formats || !(supported_formats & BIT(HDMI_COLORSPACE_RGB)))
return -EINVAL;
+ if (connector->ycbcr_420_allowed != !!(supported_formats & BIT(HDMI_COLORSPACE_YUV420)))
+ return -EINVAL;
+
if (!(max_bpc == 8 || max_bpc == 10 || max_bpc == 12))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 6a7060889f40..223c4218c019 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1694,7 +1694,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
* arithmetic related to alignment and offset calculation.
*/
if (is_gen12_ccs_cc_plane(&fb->base, i)) {
- if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE))
+ if (IS_ALIGNED(fb->base.offsets[i], 64))
continue;
else
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index f57e4dba2873..8fee26d791f4 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1158,9 +1158,15 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- intel_hdcp_update_value(connector,
- DRM_MODE_CONTENT_PROTECTION_DESIRED,
- true);
+ ret = intel_hdcp1_enable(connector);
+ if (ret) {
+ drm_err(display->drm, "Failed to enable hdcp (%d)\n", ret);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
+ goto out;
+ }
+
out:
mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index f496e6cfdfe0..e47debd60619 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -14,9 +14,6 @@ config DRM_MEDIATEK
select DRM_BRIDGE_CONNECTOR
select DRM_MIPI_DSI
select DRM_PANEL
- select MEMORY
- select MTK_SMI
- select PHY_MTK_MIPI_DSI
select VIDEOMODE_HELPERS
help
Choose this option if you have a Mediatek SoCs.
@@ -27,7 +24,6 @@ config DRM_MEDIATEK
config DRM_MEDIATEK_DP
tristate "DRM DPTX Support for MediaTek SoCs"
depends on DRM_MEDIATEK
- select PHY_MTK_DP
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_DP_AUX_BUS
@@ -38,6 +34,5 @@ config DRM_MEDIATEK_HDMI
tristate "DRM HDMI Support for Mediatek SoCs"
depends on DRM_MEDIATEK
select SND_SOC_HDMI_CODEC if SND_SOC
- select PHY_MTK_HDMI
help
DRM/KMS HDMI driver for Mediatek SoCs
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index eb0e1233ad04..5674f5707cca 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -112,6 +112,11 @@ static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
drm_crtc_handle_vblank(&mtk_crtc->base);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (mtk_crtc->cmdq_client.chan)
+ return;
+#endif
+
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
mtk_crtc_finish_page_flip(mtk_crtc);
@@ -284,10 +289,8 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
state = to_mtk_crtc_state(mtk_crtc->base.state);
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
- if (mtk_crtc->config_updating) {
- spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+ if (mtk_crtc->config_updating)
goto ddp_cmdq_cb_out;
- }
state->pending_config = false;
@@ -315,10 +318,15 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
mtk_crtc->pending_async_planes = false;
}
- spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
-
ddp_cmdq_cb_out:
+ if (mtk_crtc->pending_needs_vblank) {
+ mtk_crtc_finish_page_flip(mtk_crtc);
+ mtk_crtc->pending_needs_vblank = false;
+ }
+
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
mtk_crtc->cmdq_vblank_cnt = 0;
wake_up(&mtk_crtc->cb_blocking_queue);
}
@@ -606,13 +614,18 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
*/
mtk_crtc->cmdq_vblank_cnt = 3;
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ mtk_crtc->config_updating = false;
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
}
-#endif
+#else
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = false;
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+#endif
mutex_unlock(&mtk_crtc->hw_lock);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index f731d4fbe8b6..df82cea4bb79 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -460,6 +460,29 @@ static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
}
}
+static void mtk_ovl_afbc_layer_config(struct mtk_disp_ovl *ovl,
+ unsigned int idx,
+ struct mtk_plane_pending_state *pending,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ unsigned int pitch_msb = pending->pitch >> 16;
+ unsigned int hdr_pitch = pending->hdr_pitch;
+ unsigned int hdr_addr = pending->hdr_addr;
+
+ if (pending->modifier != DRM_FORMAT_MOD_LINEAR) {
+ mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
+ DISP_REG_OVL_HDR_ADDR(ovl, idx));
+ mtk_ddp_write_relaxed(cmdq_pkt,
+ OVL_PITCH_MSB_2ND_SUBBUF | pitch_msb,
+ &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
+ DISP_REG_OVL_HDR_PITCH(ovl, idx));
+ } else {
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch_msb,
+ &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
+ }
+}
+
void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)
@@ -467,25 +490,14 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
- unsigned int hdr_addr = pending->hdr_addr;
- unsigned int pitch = pending->pitch;
- unsigned int hdr_pitch = pending->hdr_pitch;
+ unsigned int pitch_lsb = pending->pitch & GENMASK(15, 0);
unsigned int fmt = pending->format;
+ unsigned int rotation = pending->rotation;
unsigned int offset = (pending->y << 16) | pending->x;
unsigned int src_size = (pending->height << 16) | pending->width;
unsigned int blend_mode = state->base.pixel_blend_mode;
unsigned int ignore_pixel_alpha = 0;
unsigned int con;
- bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR;
- union overlay_pitch {
- struct split_pitch {
- u16 lsb;
- u16 msb;
- } split_pitch;
- u32 pitch;
- } overlay_pitch;
-
- overlay_pitch.pitch = pitch;
if (!pending->enable) {
mtk_ovl_layer_off(dev, idx, cmdq_pkt);
@@ -513,22 +525,30 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
ignore_pixel_alpha = OVL_CONST_BLEND;
}
- if (pending->rotation & DRM_MODE_REFLECT_Y) {
+ /*
+ * Treat rotate 180 as flip x + flip y, and XOR the original rotation value
+ * to flip x + flip y to support both in the same time.
+ */
+ if (rotation & DRM_MODE_ROTATE_180)
+ rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
+
+ if (rotation & DRM_MODE_REFLECT_Y) {
con |= OVL_CON_VIRT_FLIP;
addr += (pending->height - 1) * pending->pitch;
}
- if (pending->rotation & DRM_MODE_REFLECT_X) {
+ if (rotation & DRM_MODE_REFLECT_X) {
con |= OVL_CON_HORZ_FLIP;
addr += pending->pitch - 1;
}
if (ovl->data->supports_afbc)
- mtk_ovl_set_afbc(ovl, cmdq_pkt, idx, is_afbc);
+ mtk_ovl_set_afbc(ovl, cmdq_pkt, idx,
+ pending->modifier != DRM_FORMAT_MOD_LINEAR);
mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_CON(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb | ignore_pixel_alpha,
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch_lsb | ignore_pixel_alpha,
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx));
mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_SIZE(idx));
@@ -537,19 +557,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_ADDR(ovl, idx));
- if (is_afbc) {
- mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
- DISP_REG_OVL_HDR_ADDR(ovl, idx));
- mtk_ddp_write_relaxed(cmdq_pkt,
- OVL_PITCH_MSB_2ND_SUBBUF | overlay_pitch.split_pitch.msb,
- &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
- DISP_REG_OVL_HDR_PITCH(ovl, idx));
- } else {
- mtk_ddp_write_relaxed(cmdq_pkt,
- overlay_pitch.split_pitch.msb,
- &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
- }
+ if (ovl->data->supports_afbc)
+ mtk_ovl_afbc_layer_config(ovl, idx, pending, cmdq_pkt);
mtk_ovl_set_bit_depth(dev, idx, fmt, cmdq_pkt);
mtk_ovl_layer_on(dev, idx, cmdq_pkt);
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 36713c176cfc..3b0993abfdc3 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -543,18 +543,16 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
enum dp_pixelformat color_format)
{
u32 val;
-
- /* update MISC0 */
- mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
- color_format << DP_TEST_COLOR_FORMAT_SHIFT,
- DP_TEST_COLOR_FORMAT_MASK);
+ u32 misc0_color;
switch (color_format) {
case DP_PIXELFORMAT_YUV422:
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422;
+ misc0_color = DP_COLOR_FORMAT_YCbCr422;
break;
case DP_PIXELFORMAT_RGB:
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB;
+ misc0_color = DP_COLOR_FORMAT_RGB;
break;
default:
drm_warn(mtk_dp->drm_dev, "Unsupported color format: %d\n",
@@ -562,6 +560,11 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
return -EINVAL;
}
+ /* update MISC0 */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
+ misc0_color,
+ DP_TEST_COLOR_FORMAT_MASK);
+
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
val, PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK);
return 0;
@@ -2100,7 +2103,6 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
enum drm_connector_status ret = connector_status_disconnected;
bool enabled = mtk_dp->enabled;
- u8 sink_count = 0;
if (!mtk_dp->train_info.cable_plugged_in)
return ret;
@@ -2115,8 +2117,8 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
* function, we just need to check the HPD connection to check
* whether we connect to a sink device.
*/
- drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count);
- if (DP_GET_SINK_COUNT(sink_count))
+
+ if (drm_dp_read_sink_count(&mtk_dp->aux) > 0)
ret = connector_status_connected;
if (!enabled)
@@ -2408,12 +2410,19 @@ mtk_dp_bridge_mode_valid(struct drm_bridge *bridge,
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
u32 bpp = info->color_formats & DRM_COLOR_FORMAT_YCBCR422 ? 16 : 24;
- u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
- drm_dp_max_lane_count(mtk_dp->rx_cap),
- drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
- mtk_dp->max_lanes);
+ u32 lane_count_min = mtk_dp->train_info.lane_count;
+ u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
+ lane_count_min;
- if (rate < mode->clock * bpp / 8)
+ /*
+ *FEC overhead is approximately 2.4% from DP 1.4a spec 2.2.1.4.2.
+ *The down-spread amplitude shall either be disabled (0.0%) or up
+ *to 0.5% from 1.4a 3.5.2.6. Add up to approximately 3% total overhead.
+ *
+ *Because rate is already divided by 10,
+ *mode->clock does not need to be multiplied by 10
+ */
+ if ((rate * 97 / 100) < (mode->clock * bpp / 8))
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -2454,10 +2463,9 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct drm_display_info *display_info =
&conn_state->connector->display_info;
- u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
- drm_dp_max_lane_count(mtk_dp->rx_cap),
- drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
- mtk_dp->max_lanes);
+ u32 lane_count_min = mtk_dp->train_info.lane_count;
+ u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
+ lane_count_min;
*num_input_fmts = 0;
@@ -2466,8 +2474,8 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
* datarate of YUV422 and sink device supports YUV422, we output YUV422
* format. Use this condition, we can support more resolution.
*/
- if ((rate < (mode->clock * 24 / 8)) &&
- (rate > (mode->clock * 16 / 8)) &&
+ if (((rate * 97 / 100) < (mode->clock * 24 / 8)) &&
+ ((rate * 97 / 100) > (mode->clock * 16 / 8)) &&
(display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) {
input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 0829ceb9967c..b037616d3342 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -373,11 +373,12 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
struct mtk_drm_private *temp_drm_priv;
struct device_node *phandle = dev->parent->of_node;
const struct of_device_id *of_id;
+ struct device_node *node;
struct device *drm_dev;
unsigned int cnt = 0;
int i, j;
- for_each_child_of_node_scoped(phandle->parent, node) {
+ for_each_child_of_node(phandle->parent, node) {
struct platform_device *pdev;
of_id = of_match_node(mtk_drm_of_ids, node);
@@ -406,8 +407,10 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
if (temp_drm_priv->mtk_drm_bound)
cnt++;
- if (cnt == MAX_CRTC)
+ if (cnt == MAX_CRTC) {
+ of_node_put(node);
break;
+ }
}
if (drm_priv->data->mmsys_dev_num == cnt) {
@@ -673,6 +676,8 @@ err_deinit:
err_free:
private->drm = NULL;
drm_dev_put(drm);
+ for (i = 0; i < private->data->mmsys_dev_num; i++)
+ private->all_drm_private[i]->drm = NULL;
return ret;
}
@@ -900,7 +905,7 @@ static int mtk_drm_of_ddp_path_build_one(struct device *dev, enum mtk_crtc_path
const unsigned int **out_path,
unsigned int *out_path_len)
{
- struct device_node *next, *prev, *vdo = dev->parent->of_node;
+ struct device_node *next = NULL, *prev, *vdo = dev->parent->of_node;
unsigned int temp_path[DDP_COMPONENT_DRM_ID_MAX] = { 0 };
unsigned int *final_ddp_path;
unsigned short int idx = 0;
@@ -1089,7 +1094,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
/* No devicetree graphs support: go with hardcoded paths if present */
dev_dbg(dev, "Using hardcoded paths for MMSYS %u\n", mtk_drm_data->mmsys_id);
private->data = mtk_drm_data;
- };
+ }
private->all_drm_private = devm_kmalloc_array(dev, private->data->mmsys_dev_num,
sizeof(*private->all_drm_private),
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index e61b9bc68e9a..40752f232054 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -139,11 +139,11 @@
#define CLK_HS_POST GENMASK(15, 8)
#define CLK_HS_EXIT GENMASK(23, 16)
-#define DSI_VM_CMD_CON 0x130
+/* DSI_VM_CMD_CON */
#define VM_CMD_EN BIT(0)
#define TS_VFP_EN BIT(5)
-#define DSI_SHADOW_DEBUG 0x190U
+/* DSI_SHADOW_DEBUG */
#define FORCE_COMMIT BIT(0)
#define BYPASS_SHADOW BIT(1)
@@ -187,6 +187,8 @@ struct phy;
struct mtk_dsi_driver_data {
const u32 reg_cmdq_off;
+ const u32 reg_vm_cmd_off;
+ const u32 reg_shadow_dbg_off;
bool has_shadow_ctl;
bool has_size_ctl;
bool cmdq_long_packet_ctl;
@@ -246,23 +248,22 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ);
struct mtk_phy_timing *timing = &dsi->phy_timing;
- timing->lpx = (80 * data_rate_mhz / (8 * 1000)) + 1;
- timing->da_hs_prepare = (59 * data_rate_mhz + 4 * 1000) / 8000 + 1;
- timing->da_hs_zero = (163 * data_rate_mhz + 11 * 1000) / 8000 + 1 -
+ timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
+ timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
timing->da_hs_prepare;
- timing->da_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
+ timing->da_hs_trail = timing->da_hs_prepare + 1;
- timing->ta_go = 4 * timing->lpx;
- timing->ta_sure = 3 * timing->lpx / 2;
- timing->ta_get = 5 * timing->lpx;
- timing->da_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->ta_go = 4 * timing->lpx - 2;
+ timing->ta_sure = timing->lpx + 2;
+ timing->ta_get = 4 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx + 1;
- timing->clk_hs_prepare = (57 * data_rate_mhz / (8 * 1000)) + 1;
- timing->clk_hs_post = (65 * data_rate_mhz + 53 * 1000) / 8000 + 1;
- timing->clk_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
- timing->clk_hs_zero = (330 * data_rate_mhz / (8 * 1000)) + 1 -
- timing->clk_hs_prepare;
- timing->clk_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
+ timing->clk_hs_post = timing->clk_hs_prepare + 8;
+ timing->clk_hs_trail = timing->clk_hs_prepare;
+ timing->clk_hs_zero = timing->clk_hs_trail * 4;
+ timing->clk_hs_exit = 2 * timing->clk_hs_trail;
timcon0 = FIELD_PREP(LPX, timing->lpx) |
FIELD_PREP(HS_PREP, timing->da_hs_prepare) |
@@ -367,8 +368,8 @@ static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
{
- mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
- mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
+ mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, VM_CMD_EN, VM_CMD_EN);
+ mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, TS_VFP_EN, TS_VFP_EN);
}
static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
@@ -714,7 +715,7 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
if (dsi->driver_data->has_shadow_ctl)
writel(FORCE_COMMIT | BYPASS_SHADOW,
- dsi->regs + DSI_SHADOW_DEBUG);
+ dsi->regs + dsi->driver_data->reg_shadow_dbg_off);
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
@@ -1263,26 +1264,36 @@ static void mtk_dsi_remove(struct platform_device *pdev)
static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
.reg_cmdq_off = 0x200,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190
};
static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
.reg_cmdq_off = 0x180,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190
};
static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
.reg_cmdq_off = 0x200,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190,
.has_shadow_ctl = true,
.has_size_ctl = true,
};
static const struct mtk_dsi_driver_data mt8186_dsi_driver_data = {
.reg_cmdq_off = 0xd00,
+ .reg_vm_cmd_off = 0x200,
+ .reg_shadow_dbg_off = 0xc00,
.has_shadow_ctl = true,
.has_size_ctl = true,
};
static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = {
.reg_cmdq_off = 0xd00,
+ .reg_vm_cmd_off = 0x200,
+ .reg_shadow_dbg_off = 0xc00,
.has_shadow_ctl = true,
.has_size_ctl = true,
.cmdq_long_packet_ctl = true,
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8f0c69aad248..21b56cc7605c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -384,7 +384,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
if (ret < 0)
return NULL;
- return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ return edid;
}
bool nouveau_acpi_video_backlight_use_native(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 09686d038d60..7cc84472cece 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -387,11 +387,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
if (f) {
struct nouveau_channel *prev;
bool must_wait = true;
+ bool local;
rcu_read_lock();
prev = rcu_dereference(f->channel);
- if (prev && (prev == chan ||
- fctx->sync(f, prev, chan) == 0))
+ local = prev && prev->cli->drm == chan->cli->drm;
+ if (local && (prev == chan ||
+ fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
if (!must_wait)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index 841e3b69fcaf..5a0c9b8a79f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -31,6 +31,7 @@ mcp77_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
+ .bl = &nv50_sor_bl,
.hdmi = &g84_sor_hdmi,
.dp = &g94_sor_dp,
};
diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c
index 6bba97d0be88..3d48d676e001 100644
--- a/drivers/gpu/drm/tests/drm_connector_test.c
+++ b/drivers/gpu/drm/tests/drm_connector_test.c
@@ -635,6 +635,64 @@ static void drm_test_connector_hdmi_init_formats_no_rgb(struct kunit *test)
KUNIT_EXPECT_LT(test, ret, 0);
}
+struct drm_connector_hdmi_init_formats_yuv420_allowed_test {
+ unsigned long supported_formats;
+ bool yuv420_allowed;
+ int expected_result;
+};
+
+#define YUV420_ALLOWED_TEST(_formats, _allowed, _result) \
+ { \
+ .supported_formats = BIT(HDMI_COLORSPACE_RGB) | (_formats), \
+ .yuv420_allowed = _allowed, \
+ .expected_result = _result, \
+ }
+
+static const struct drm_connector_hdmi_init_formats_yuv420_allowed_test
+drm_connector_hdmi_init_formats_yuv420_allowed_tests[] = {
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), true, 0),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), false, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), true, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), false, 0),
+};
+
+static void
+drm_connector_hdmi_init_formats_yuv420_allowed_desc(const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *t,
+ char *desc)
+{
+ sprintf(desc, "supported_formats=0x%lx yuv420_allowed=%d",
+ t->supported_formats, t->yuv420_allowed);
+}
+
+KUNIT_ARRAY_PARAM(drm_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_tests,
+ drm_connector_hdmi_init_formats_yuv420_allowed_desc);
+
+/*
+ * Test that the registration of an HDMI connector succeeds only when
+ * the presence of YUV420 in the supported formats matches the value
+ * of the ycbcr_420_allowed flag.
+ */
+static void drm_test_connector_hdmi_init_formats_yuv420_allowed(struct kunit *test)
+{
+ const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *params;
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ params = test->param_value;
+ priv->connector.ycbcr_420_allowed = params->yuv420_allowed;
+
+ ret = drmm_connector_hdmi_init(&priv->drm, &priv->connector,
+ "Vendor", "Product",
+ &dummy_funcs,
+ &dummy_hdmi_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ &priv->ddc,
+ params->supported_formats,
+ 8);
+ KUNIT_EXPECT_EQ(test, ret, params->expected_result);
+}
+
/*
* Test that the registration of an HDMI connector with an HDMI
* connector type succeeds.
@@ -726,6 +784,8 @@ static struct kunit_case drmm_connector_hdmi_init_tests[] = {
KUNIT_CASE(drm_test_connector_hdmi_init_bpc_null),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_empty),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_no_rgb),
+ KUNIT_CASE_PARAM(drm_test_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_gen_params),
KUNIT_CASE(drm_test_connector_hdmi_init_null_ddc),
KUNIT_CASE(drm_test_connector_hdmi_init_null_product),
KUNIT_CASE(drm_test_connector_hdmi_init_null_vendor),
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 04a6b8cc62ac..3c0b7824c0be 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -320,8 +320,7 @@ static void kunit_action_drm_mode_destroy(void *ptr)
}
/**
- * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC
- for a KUnit test
+ * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
* @test: The test context object
* @dev: DRM device
* @video_code: CEA VIC of the mode
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 20bf33702c3c..da203045df9b 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -108,6 +108,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->bin_job = NULL;
status = IRQ_HANDLED;
}
@@ -118,6 +119,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->render_job = NULL;
status = IRQ_HANDLED;
}
@@ -128,6 +130,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->csd_job = NULL;
status = IRQ_HANDLED;
}
@@ -165,6 +168,7 @@ v3d_hub_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->tfu_job = NULL;
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index a0e433fbcba6..9b5b8c1f063b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -228,7 +228,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
buf->places[0].lpfn = PFN_UP(bo->resource->size);
- buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
@@ -443,7 +442,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
if (params->pin)
ttm_bo_pin(&vmw_bo->tbo);
- ttm_bo_unreserve(&vmw_bo->tbo);
+ if (!params->keep_resv)
+ ttm_bo_unreserve(&vmw_bo->tbo);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 43b5439ec9f7..11e330c7c7f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -56,8 +56,9 @@ struct vmw_bo_params {
u32 domain;
u32 busy_domain;
enum ttm_bo_type bo_type;
- size_t size;
bool pin;
+ bool keep_resv;
+ size_t size;
struct dma_resv *resv;
struct sg_table *sg;
};
@@ -83,7 +84,6 @@ struct vmw_bo {
struct ttm_placement placement;
struct ttm_place places[5];
- struct ttm_place busy_places[5];
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2c46897876dd..b19a062592b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -403,7 +403,8 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_kernel,
.size = PAGE_SIZE,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
/*
@@ -415,10 +416,6 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
- vmw_bo_pin_reserved(vbo, true);
-
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index b9857f37ca1a..ed5015ced392 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -206,6 +206,7 @@ struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
.bo_type = ttm_bo_type_sg,
.size = attach->dmabuf->size,
.pin = false,
+ .keep_resv = true,
.resv = attach->dmabuf->resv,
.sg = table,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8db38927729b..800a79e035ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -750,6 +750,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
struct vmw_bo *old_bo = NULL;
struct vmw_bo *new_bo = NULL;
+ struct ww_acquire_ctx ctx;
s32 hotspot_x, hotspot_y;
int ret;
@@ -769,9 +770,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (du->cursor_surface)
du->cursor_age = du->cursor_surface->snooper.age;
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
if (!vmw_user_object_is_null(&old_vps->uo)) {
old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
if (ret != 0)
return;
}
@@ -779,9 +782,14 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (!vmw_user_object_is_null(&vps->uo)) {
new_bo = vmw_user_object_buffer(&vps->uo);
if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
- if (ret != 0)
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ if (old_bo) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ }
return;
+ }
} else {
new_bo = NULL;
}
@@ -803,10 +811,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x, hotspot_y);
}
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
if (new_bo)
ttm_bo_unreserve(&new_bo->tbo);
+ if (old_bo)
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index a01ca3226d0a..7fb1c88bcc47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
if (!vmw_shader_id_ok(user_key, shader_type))
@@ -906,10 +907,6 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out;
- ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
- if (unlikely(ret != 0))
- goto no_reserve;
-
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 621d98b376bb..5553892d7c3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -572,15 +572,14 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
.busy_domain = domain,
.bo_type = ttm_bo_type_kernel,
.size = bo_size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 3e0ae40ebbd2..812046f5d741 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -257,10 +257,9 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
* however seems quite fragile not to also restart the GT. Try
* to do that here by triggering a GT reset.
*/
- for_each_gt(__gt, xe, id) {
- xe_gt_reset_async(__gt);
- flush_work(&__gt->reset.worker);
- }
+ for_each_gt(__gt, xe, id)
+ xe_gt_reset(__gt);
+
if (err) {
KUNIT_FAIL(test, "restore kernel err=%pe\n",
ERR_PTR(err));
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 6f9b7a266b41..2698e433c6fa 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -162,8 +162,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
- xe_gt_reset_async(gt);
- flush_work(&gt->reset.worker);
+ xe_gt_reset(gt);
kunit_info(test, "mocs_reset_test after reset\n");
if (flags & HAS_GLOBAL_MOCS)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index d6744be01a68..94d468d01253 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -387,6 +387,10 @@ int xe_gt_init_early(struct xe_gt *gt)
xe_force_wake_init_gt(gt, gt_to_fw(gt));
spin_lock_init(&gt->global_invl_lock);
+ err = xe_gt_tlb_invalidation_init_early(gt);
+ if (err)
+ return err;
+
return 0;
}
@@ -588,10 +592,6 @@ int xe_gt_init(struct xe_gt *gt)
xe_hw_fence_irq_init(&gt->fence_irq[i]);
}
- err = xe_gt_tlb_invalidation_init(gt);
- if (err)
- return err;
-
err = xe_gt_pagefault_init(gt);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 82b9b7f82fca..eb2946963420 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -57,6 +57,31 @@ int xe_gt_sanitize_freq(struct xe_gt *gt);
void xe_gt_remove(struct xe_gt *gt);
/**
+ * xe_gt_wait_for_reset - wait for gt's async reset to finalize.
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the work to finish execution,
+ * %false if there was no scheduled reset or it was done.
+ */
+static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
+{
+ return flush_work(&gt->reset.worker);
+}
+
+/**
+ * xe_gt_reset - perform synchronous reset
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the reset to finish,
+ * %false if there was no scheduled reset.
+ */
+static inline bool xe_gt_reset(struct xe_gt *gt)
+{
+ xe_gt_reset_async(gt);
+ return xe_gt_wait_for_reset(gt);
+}
+
+/**
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
* first that matches the same reset domain as @class
* @gt: GT structure
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index b6adfb9f2030..50fffc9ebf62 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -150,7 +150,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
gt->ccs_mode = num_engines;
xe_gt_record_user_engines(gt);
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
}
mutex_unlock(&xe->drm.filelist_mutex);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 3e8c351a0eab..e7792858b1e4 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -132,11 +132,9 @@ static int force_reset(struct xe_gt *gt, struct drm_printer *p)
static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
xe_pm_runtime_put(gt_to_xe(gt));
- flush_work(&gt->reset.worker);
-
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index fd80afeef56a..ffd3ba7f6656 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -122,10 +122,12 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
if (!xe_gt_is_media_type(gt))
gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
- for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
- if ((gt->info.engine_mask & BIT(i)))
- gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
- VDN_MFXVDENC_POWERGATE_ENABLE(j));
+ if (xe->info.platform != XE_DG1) {
+ for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
+ if ((gt->info.engine_mask & BIT(i)))
+ gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
+ VDN_MFXVDENC_POWERGATE_ENABLE(j));
+ }
}
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 5013d674e17d..d929a78c26a9 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -550,9 +550,9 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
* Returns true if the caller should steer to the @group/@instance values
* returned. Returns false if the caller need not perform any steering
*/
-static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
- struct xe_reg_mcr reg_mcr,
- u8 *group, u8 *instance)
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance)
{
const struct xe_reg reg = to_xe_reg(reg_mcr);
const struct xe_mmio_range *implicit_ranges;
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index c0cd36021c24..bc06520befab 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -26,6 +26,10 @@ void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
u32 value);
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance);
+
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance);
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 6146d1776bda..0a0af413770e 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -106,7 +106,7 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
}
/**
- * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
+ * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
* @gt: graphics tile
*
* Initialize GT TLB invalidation state, purely software initialization, should
@@ -114,7 +114,7 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
*
* Return: 0 on success, negative error code on error.
*/
-int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
+int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
{
gt->tlb_invalidation.seqno = 1;
INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index 00b1c6c01e8d..672acfcdf0d7 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -14,7 +14,8 @@ struct xe_gt;
struct xe_guc;
struct xe_vma;
-int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
+
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 4e746ae98888..dd7f9d0effa9 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -29,6 +29,7 @@
#include "xe_platform_types.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
+#include "xe_gt_mcr.h"
/* Slack of a few additional entries per engine */
#define ADS_REGSET_EXTRA_MAX 8
@@ -701,6 +702,20 @@ static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
.flags = reg.masked ? GUC_REGSET_MASKED : 0,
};
+ if (reg.mcr) {
+ struct xe_reg_mcr mcr_reg = XE_REG_MCR(reg.addr);
+ u8 group, instance;
+
+ bool steer = xe_gt_mcr_get_nonterminated_steering(ads_to_gt(ads), mcr_reg,
+ &group, &instance);
+
+ if (steer) {
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, group);
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, instance);
+ entry.flags |= GUC_REGSET_STEERING_NEEDED;
+ }
+ }
+
xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
&entry, sizeof(entry));
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 1557acee3523..eeeb98f66982 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -419,7 +419,7 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
* Bspec: 72161
*/
const u8 mocs_write_idx = gt->mocs.uc_index;
- const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
+ const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
(GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
gt->mocs.wb_index : gt->mocs.uc_index;
u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 5cc0f6f9bc11..678fa40e4cea 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -2198,6 +2198,7 @@ static const struct xe_mmio_range xe2_oa_mux_regs[] = {
{ .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
{ .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */
{ .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */
+ { .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */
{ .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */
{ .start = 0xE590, .end = 0xE590 }, /* TDL_LSC_LAT_MEASURE_TDL_GFX */
{ .start = 0x13000, .end = 0x137FC }, /* PES_0_PESL0 - PES_63_UPPER_PESL3 */
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 2f1c9d97ad21..3db1b9d09c32 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -682,7 +682,7 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
/* _PMD method is optional. */
res = read_domain_devices(resource);
- if (res != -ENODEV)
+ if (res && res != -ENODEV)
return res;
if (resource->caps.flags & POWER_METER_CAN_MEASURE) {
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 6bdd21aa005a..291d91f68646 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -165,6 +165,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
{
u8 scsi_cmd[MAX_COMMAND_SIZE];
enum req_op op;
+ int err;
memset(scsi_cmd, 0, sizeof(scsi_cmd));
scsi_cmd[0] = ATA_16;
@@ -192,8 +193,11 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
scsi_cmd[12] = lba_high;
scsi_cmd[14] = ata_command;
- return scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
- ATA_SECT_SIZE, HZ, 5, NULL);
+ err = scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
+ ATA_SECT_SIZE, 10 * HZ, 5, NULL);
+ if (err > 0)
+ err = -EIO;
+ return err;
}
static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
index 7ca139e4b6af..6d5d4cb846da 100644
--- a/drivers/hwmon/ltc2991.c
+++ b/drivers/hwmon/ltc2991.c
@@ -125,7 +125,7 @@ static int ltc2991_get_curr(struct ltc2991_state *st, u32 reg, int channel,
/* Vx-Vy, 19.075uV/LSB */
*val = DIV_ROUND_CLOSEST(sign_extend32(reg_val, 14) * 19075,
- st->r_sense_uohm[channel]);
+ (s32)st->r_sense_uohm[channel]);
return 0;
}
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index 1c2cb12071b8..5acbfd7d088d 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -207,7 +207,8 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
*val = sign_extend32(regval,
reg == TMP51X_SHUNT_CURRENT_RESULT ?
16 - tmp51x_get_pga_shift(data) : 15);
- *val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
+ *val = DIV_ROUND_CLOSEST(*val * 10 * (long)MILLI, (long)data->shunt_uohms);
+
break;
case TMP51X_BUS_VOLTAGE_RESULT:
case TMP51X_BUS_VOLTAGE_H_LIMIT:
@@ -223,7 +224,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
case TMP51X_BUS_CURRENT_RESULT:
// Current = (ShuntVoltage * CalibrationRegister) / 4096
*val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua;
- *val = DIV_ROUND_CLOSEST(*val, MILLI);
+ *val = DIV_ROUND_CLOSEST(*val, (long)MILLI);
break;
case TMP51X_LOCAL_TEMP_RESULT:
case TMP51X_REMOTE_TEMP_RESULT_1:
@@ -263,7 +264,7 @@ static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
* The user enter current value and we convert it to
* voltage. 1lsb = 10uV
*/
- val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10 * MILLI);
+ val = DIV_ROUND_CLOSEST(val * (long)data->shunt_uohms, 10 * (long)MILLI);
max_val = U16_MAX >> tmp51x_get_pga_shift(data);
regval = clamp_val(val, -max_val, max_val);
break;
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index a7b77d14ee86..5693a38da7b5 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -130,6 +130,8 @@
#define ID_P_PM_BLOCKED BIT(31)
#define ID_P_MASK GENMASK(31, 27)
+#define ID_SLAVE_NACK BIT(0)
+
enum rcar_i2c_type {
I2C_RCAR_GEN1,
I2C_RCAR_GEN2,
@@ -166,6 +168,7 @@ struct rcar_i2c_priv {
int irq;
struct i2c_client *host_notify_client;
+ u8 slave_flags;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -655,6 +658,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
{
u32 ssr_raw, ssr_filtered;
u8 value;
+ int ret;
ssr_raw = rcar_i2c_read(priv, ICSSR) & 0xff;
ssr_filtered = ssr_raw & rcar_i2c_read(priv, ICSIER);
@@ -670,7 +674,10 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICRXTX, value);
rcar_i2c_write(priv, ICSIER, SDE | SSR | SAR);
} else {
- i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ if (ret)
+ priv->slave_flags |= ID_SLAVE_NACK;
+
rcar_i2c_read(priv, ICRXTX); /* dummy read */
rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
}
@@ -683,18 +690,21 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
if (ssr_filtered & SSR) {
i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
+ priv->slave_flags &= ~ID_SLAVE_NACK;
rcar_i2c_write(priv, ICSIER, SAR);
rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
}
/* master wants to write to us */
if (ssr_filtered & SDR) {
- int ret;
-
value = rcar_i2c_read(priv, ICRXTX);
ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
- /* Send NACK in case of error */
- rcar_i2c_write(priv, ICSCR, SIE | SDBS | (ret < 0 ? FNA : 0));
+ if (ret)
+ priv->slave_flags |= ID_SLAVE_NACK;
+
+ /* Send NACK in case of error, but it will come 1 byte late :( */
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS |
+ (priv->slave_flags & ID_SLAVE_NACK ? FNA : 0));
rcar_i2c_write(priv, ICSSR, ~SDR & 0xff);
}
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index b7c10ced5a43..8fe9ddff8e96 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -412,7 +412,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
dev_name(dev), ret);
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_REMOVED_DEVICE:
i2c_atr_detach_client(client->adapter, client);
break;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 7c810893bfa3..75d30861ffe2 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1562,6 +1562,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
res = device_add(&adap->dev);
if (res) {
pr_err("adapter '%s': can't register device (%d)\n", adap->name, res);
+ put_device(&adap->dev);
goto out_list;
}
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index 0d6fbaa48248..6de4307050dd 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -38,6 +38,7 @@ enum testunit_regs {
enum testunit_flags {
TU_FLAG_IN_PROCESS,
+ TU_FLAG_NACK,
};
struct testunit_data {
@@ -90,8 +91,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
switch (event) {
case I2C_SLAVE_WRITE_REQUESTED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
+ if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
+ ret = -EBUSY;
+ break;
+ }
memset(tu->regs, 0, TU_NUM_REGS);
tu->reg_idx = 0;
@@ -99,8 +102,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
break;
case I2C_SLAVE_WRITE_RECEIVED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
+ if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
+ ret = -EBUSY;
+ break;
+ }
if (tu->reg_idx < TU_NUM_REGS)
tu->regs[tu->reg_idx] = *val;
@@ -129,6 +134,8 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
* here because we still need them in the workqueue!
*/
tu->reg_idx = 0;
+
+ clear_bit(TU_FLAG_NACK, &tu->flags);
break;
case I2C_SLAVE_READ_PROCESSED:
@@ -151,6 +158,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
break;
}
+ /* If an error occurred somewhen, we NACK everything until next STOP */
+ if (ret)
+ set_bit(TU_FLAG_NACK, &tu->flags);
+
return ret;
}
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index dce18f763a09..77a740561fd7 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
}
/*
- * Check if there are pinctrl states at all. Note: we cant' use
+ * Check if there are pinctrl states at all. Note: we can't use
* devm_pinctrl_get_select() because we need to distinguish between
* the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state().
*/
@@ -261,7 +261,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
pm_runtime_no_callbacks(&pdev->dev);
/* switch to first parent as active master */
- i2c_demux_activate_master(priv, 0);
+ err = i2c_demux_activate_master(priv, 0);
+ if (err)
+ goto err_rollback;
err = device_create_file(&pdev->dev, &dev_attr_available_masters);
if (err)
diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c
index 595ec4158e73..0146aed9069f 100644
--- a/drivers/iio/adc/ad4695.c
+++ b/drivers/iio/adc/ad4695.c
@@ -91,6 +91,7 @@
#define AD4695_T_WAKEUP_SW_MS 3
#define AD4695_T_REFBUF_MS 100
#define AD4695_T_REGCONFIG_NS 20
+#define AD4695_T_SCK_CNV_DELAY_NS 80
#define AD4695_REG_ACCESS_SCLK_HZ (10 * MEGA)
/* Max number of voltage input channels. */
@@ -132,8 +133,13 @@ struct ad4695_state {
unsigned int vref_mv;
/* Common mode input pin voltage. */
unsigned int com_mv;
- /* 1 per voltage and temperature chan plus 1 xfer to trigger 1st CNV */
- struct spi_transfer buf_read_xfer[AD4695_MAX_CHANNELS + 2];
+ /*
+ * 2 per voltage and temperature chan plus 1 xfer to trigger 1st
+ * CNV. Excluding the trigger xfer, every 2nd xfer only serves
+ * to control CS and add a delay between the last SCLK and next
+ * CNV rising edges.
+ */
+ struct spi_transfer buf_read_xfer[AD4695_MAX_CHANNELS * 2 + 3];
struct spi_message buf_read_msg;
/* Raw conversion data received. */
u8 buf[ALIGN((AD4695_MAX_CHANNELS + 2) * AD4695_MAX_CHANNEL_SIZE,
@@ -423,7 +429,7 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
u8 temp_chan_bit = st->chip_info->num_voltage_inputs;
u32 bit, num_xfer, num_slots;
u32 temp_en = 0;
- int ret;
+ int ret, rx_buf_offset = 0;
/*
* We are using the advanced sequencer since it is the only way to read
@@ -449,11 +455,9 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
iio_for_each_active_channel(indio_dev, bit) {
xfer = &st->buf_read_xfer[num_xfer];
xfer->bits_per_word = 16;
- xfer->rx_buf = &st->buf[(num_xfer - 1) * 2];
+ xfer->rx_buf = &st->buf[rx_buf_offset];
xfer->len = 2;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ rx_buf_offset += xfer->len;
if (bit == temp_chan_bit) {
temp_en = 1;
@@ -468,21 +472,44 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
}
num_xfer++;
+
+ /*
+ * We need to add a blank xfer in data reads, to meet the timing
+ * requirement of a minimum delay between the last SCLK rising
+ * edge and the CS deassert.
+ */
+ xfer = &st->buf_read_xfer[num_xfer];
+ xfer->delay.value = AD4695_T_SCK_CNV_DELAY_NS;
+ xfer->delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ num_xfer++;
}
/*
* The advanced sequencer requires that at least 2 slots are enabled.
* Since slot 0 is always used for other purposes, we need only 1
- * enabled voltage channel to meet this requirement. If the temperature
- * channel is the only enabled channel, we need to add one more slot
- * in the sequence but not read from it.
+ * enabled voltage channel to meet this requirement. If the temperature
+ * channel is the only enabled channel, we need to add one more slot in
+ * the sequence but not read from it. This is because the temperature
+ * sensor is sampled at the end of the channel sequence in advanced
+ * sequencer mode (see datasheet page 38).
+ *
+ * From the iio_for_each_active_channel() block above, we now have an
+ * xfer with data followed by a blank xfer to allow us to meet the
+ * timing spec, so move both of those up before adding an extra to
+ * handle the temperature-only case.
*/
if (num_slots < 2) {
- /* move last xfer so we can insert one more xfer before it */
- st->buf_read_xfer[num_xfer] = *xfer;
+ /* Move last two xfers */
+ st->buf_read_xfer[num_xfer] = st->buf_read_xfer[num_xfer - 1];
+ st->buf_read_xfer[num_xfer - 1] = st->buf_read_xfer[num_xfer - 2];
num_xfer++;
- /* modify 2nd to last xfer for extra slot */
+ /* Modify inserted xfer for extra slot. */
+ xfer = &st->buf_read_xfer[num_xfer - 3];
memset(xfer, 0, sizeof(*xfer));
xfer->cs_change = 1;
xfer->delay.value = st->chip_info->t_acq_ns;
@@ -499,6 +526,12 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
return ret;
num_slots++;
+
+ /*
+ * We still want to point at the last xfer when finished, so
+ * update the pointer.
+ */
+ xfer = &st->buf_read_xfer[num_xfer - 1];
}
/*
@@ -583,8 +616,20 @@ out:
*/
static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
{
- struct spi_transfer xfer[2] = { };
- int ret, i = 0;
+ struct spi_transfer xfers[2] = {
+ {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .bits_per_word = 16,
+ .tx_buf = &st->cnv_cmd,
+ .len = 2,
+ },
+ {
+ /* Required delay between last SCLK and CNV/CS */
+ .delay.value = AD4695_T_SCK_CNV_DELAY_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ }
+ };
+ int ret;
ret = ad4695_set_single_cycle_mode(st, address);
if (ret)
@@ -592,29 +637,22 @@ static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
/*
* Setting the first channel to the temperature channel isn't supported
- * in single-cycle mode, so we have to do an extra xfer to read the
- * temperature.
+ * in single-cycle mode, so we have to do an extra conversion to read
+ * the temperature.
*/
if (address == AD4695_CMD_TEMP_CHAN) {
- /* We aren't reading, so we can make this a short xfer. */
- st->cnv_cmd2 = AD4695_CMD_TEMP_CHAN << 3;
- xfer[0].tx_buf = &st->cnv_cmd2;
- xfer[0].len = 1;
- xfer[0].cs_change = 1;
- xfer[0].cs_change_delay.value = AD4695_T_CONVERT_NS;
- xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
-
- i = 1;
+ st->cnv_cmd = AD4695_CMD_TEMP_CHAN << 11;
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+ if (ret)
+ return ret;
}
/* Then read the result and exit conversion mode. */
st->cnv_cmd = AD4695_CMD_EXIT_CNV_MODE << 11;
- xfer[i].bits_per_word = 16;
- xfer[i].tx_buf = &st->cnv_cmd;
- xfer[i].rx_buf = &st->raw_data;
- xfer[i].len = 2;
+ xfers[0].rx_buf = &st->raw_data;
- return spi_sync_transfer(st->spi, xfer, i + 1);
+ return spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
}
static int ad4695_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 7314fb32bdec..3d678c420cbf 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -917,6 +917,9 @@ static int ad7124_setup(struct ad7124_state *st)
* set all channels to this default value.
*/
ad7124_set_channel_odr(st, i, 10);
+
+ /* Disable all channels to prevent unintended conversions. */
+ ad_sd_write_reg(&st->sd, AD7124_CHANNEL(i), 2, 0);
}
ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 8a0c931ca83a..8b03c1e5567e 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -200,6 +200,7 @@ struct ad7173_channel {
struct ad7173_state {
struct ad_sigma_delta sd;
+ struct ad_sigma_delta_info sigma_delta_info;
const struct ad7173_device_info *info;
struct ad7173_channel *channels;
struct regulator_bulk_data regulators[3];
@@ -753,7 +754,7 @@ static int ad7173_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
return ad_sd_write_reg(sd, AD7173_REG_CH(chan), 2, 0);
}
-static struct ad_sigma_delta_info ad7173_sigma_delta_info = {
+static const struct ad_sigma_delta_info ad7173_sigma_delta_info = {
.set_channel = ad7173_set_channel,
.append_status = ad7173_append_status,
.disable_all = ad7173_disable_all,
@@ -1403,7 +1404,7 @@ static int ad7173_fw_parse_device_config(struct iio_dev *indio_dev)
if (ret < 0)
return dev_err_probe(dev, ret, "Interrupt 'rdy' is required\n");
- ad7173_sigma_delta_info.irq_line = ret;
+ st->sigma_delta_info.irq_line = ret;
return ad7173_fw_parse_channel_config(indio_dev);
}
@@ -1436,8 +1437,9 @@ static int ad7173_probe(struct spi_device *spi)
spi->mode = SPI_MODE_3;
spi_setup(spi);
- ad7173_sigma_delta_info.num_slots = st->info->num_configs;
- ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7173_sigma_delta_info);
+ st->sigma_delta_info = ad7173_sigma_delta_info;
+ st->sigma_delta_info.num_slots = st->info->num_configs;
+ ret = ad_sd_init(&st->sd, indio_dev, spi, &st->sigma_delta_info);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index d358958ab310..f30119b42ba0 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -895,7 +895,7 @@ static int ad9467_update_scan_mode(struct iio_dev *indio_dev,
return 0;
}
-static struct iio_info ad9467_info = {
+static const struct iio_info ad9467_info = {
.read_raw = ad9467_read_raw,
.write_raw = ad9467_write_raw,
.update_scan_mode = ad9467_update_scan_mode,
@@ -903,6 +903,14 @@ static struct iio_info ad9467_info = {
.read_avail = ad9467_read_avail,
};
+/* Same as above, but without .read_avail */
+static const struct iio_info ad9467_info_no_read_avail = {
+ .read_raw = ad9467_read_raw,
+ .write_raw = ad9467_write_raw,
+ .update_scan_mode = ad9467_update_scan_mode,
+ .debugfs_reg_access = ad9467_reg_access,
+};
+
static int ad9467_scale_fill(struct ad9467_state *st)
{
const struct ad9467_chip_info *info = st->info;
@@ -1214,11 +1222,12 @@ static int ad9467_probe(struct spi_device *spi)
}
if (st->info->num_scales > 1)
- ad9467_info.read_avail = ad9467_read_avail;
+ indio_dev->info = &ad9467_info;
+ else
+ indio_dev->info = &ad9467_info_no_read_avail;
indio_dev->name = st->info->name;
indio_dev->channels = st->info->channels;
indio_dev->num_channels = st->info->num_channels;
- indio_dev->info = &ad9467_info;
ret = ad9467_iio_backend_get(st);
if (ret)
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index a3f0a2321666..5927756b749a 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -979,7 +979,7 @@ static int at91_ts_register(struct iio_dev *idev,
return ret;
err:
- input_free_device(st->ts_input);
+ input_free_device(input);
return ret;
}
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 240cfa391674..dfd47a6e1f4a 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -368,6 +368,8 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p)
int ret;
int i, j = 0;
+ memset(&data, 0, sizeof(data));
+
mutex_lock(&info->lock);
iio_for_each_active_channel(i_dev, i) {
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 1f9eca2fb2bf..fe11b0d8eab3 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -691,11 +691,14 @@ static int stm32_dfsdm_generic_channel_parse_of(struct stm32_dfsdm *dfsdm,
return -EINVAL;
}
- ret = fwnode_property_read_string(node, "label", &ch->datasheet_name);
- if (ret < 0) {
- dev_err(&indio_dev->dev,
- " Error parsing 'label' for idx %d\n", ch->channel);
- return ret;
+ if (fwnode_property_present(node, "label")) {
+ /* label is optional */
+ ret = fwnode_property_read_string(node, "label", &ch->datasheet_name);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'label' for idx %d\n", ch->channel);
+ return ret;
+ }
}
df_ch = &dfsdm->ch_list[ch->channel];
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index e9d9d4d46d38..c268e27eec12 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -500,12 +500,14 @@ static irqreturn_t ads1119_trigger_handler(int irq, void *private)
struct iio_dev *indio_dev = pf->indio_dev;
struct ads1119_state *st = iio_priv(indio_dev);
struct {
- unsigned int sample;
+ s16 sample;
s64 timestamp __aligned(8);
} scan;
unsigned int index;
int ret;
+ memset(&scan, 0, sizeof(scan));
+
if (!iio_trigger_using_own(indio_dev)) {
index = find_first_bit(indio_dev->active_scan_mask,
iio_get_masklength(indio_dev));
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 425b48d8986f..f452f57f11c9 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -183,9 +183,9 @@ static int ads124s_reset(struct iio_dev *indio_dev)
struct ads124s_private *priv = iio_priv(indio_dev);
if (priv->reset_gpio) {
- gpiod_set_value(priv->reset_gpio, 0);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
udelay(200);
- gpiod_set_value(priv->reset_gpio, 1);
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
} else {
return ads124s_write_cmd(indio_dev, ADS124S08_CMD_RESET);
}
diff --git a/drivers/iio/adc/ti-ads1298.c b/drivers/iio/adc/ti-ads1298.c
index 36d43495f603..03f762415fa5 100644
--- a/drivers/iio/adc/ti-ads1298.c
+++ b/drivers/iio/adc/ti-ads1298.c
@@ -613,6 +613,8 @@ static int ads1298_init(struct iio_dev *indio_dev)
}
indio_dev->name = devm_kasprintf(dev, GFP_KERNEL, "ads129%u%s",
indio_dev->num_channels, suffix);
+ if (!indio_dev->name)
+ return -ENOMEM;
/* Enable internal test signal, double amplitude, double frequency */
ret = regmap_write(priv->regmap, ADS1298_REG_CONFIG2,
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 9b1814f1965a..a31658b760a4 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -381,7 +381,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
/* Ensure naturally aligned timestamp */
- u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8);
+ u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8) = { };
int i, j = 0;
iio_for_each_active_channel(indio_dev, i) {
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index 4ca3f1aaff99..288880346707 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -48,7 +48,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
int i = 0, j;
u16 *data;
- data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ data = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
if (!data)
goto done;
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
index 0391c78c2f18..754c8a564ba4 100644
--- a/drivers/iio/gyro/fxas21002c_core.c
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -730,14 +730,21 @@ static irqreturn_t fxas21002c_trigger_handler(int irq, void *p)
int ret;
mutex_lock(&data->lock);
+ ret = fxas21002c_pm_get(data);
+ if (ret < 0)
+ goto out_unlock;
+
ret = regmap_bulk_read(data->regmap, FXAS21002C_REG_OUT_X_MSB,
data->buffer, CHANNEL_SCAN_MAX * sizeof(s16));
if (ret < 0)
- goto out_unlock;
+ goto out_pm_put;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
data->timestamp);
+out_pm_put:
+ fxas21002c_pm_put(data);
+
out_unlock:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
index 3a07e43e4cf1..18787a43477b 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
@@ -403,6 +403,7 @@ struct inv_icm42600_sensor_state {
typedef int (*inv_icm42600_bus_setup)(struct inv_icm42600_state *);
extern const struct regmap_config inv_icm42600_regmap_config;
+extern const struct regmap_config inv_icm42600_spi_regmap_config;
extern const struct dev_pm_ops inv_icm42600_pm_ops;
const struct iio_mount_matrix *
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 561d245c1d64..ef9875d3b79d 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -87,6 +87,21 @@ const struct regmap_config inv_icm42600_regmap_config = {
};
EXPORT_SYMBOL_NS_GPL(inv_icm42600_regmap_config, "IIO_ICM42600");
+/* define specific regmap for SPI not supporting burst write */
+const struct regmap_config inv_icm42600_spi_regmap_config = {
+ .name = "inv_icm42600",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x4FFF,
+ .ranges = inv_icm42600_regmap_ranges,
+ .num_ranges = ARRAY_SIZE(inv_icm42600_regmap_ranges),
+ .volatile_table = inv_icm42600_regmap_volatile_accesses,
+ .rd_noinc_table = inv_icm42600_regmap_rd_noinc_accesses,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_write = true,
+};
+EXPORT_SYMBOL_NS_GPL(inv_icm42600_spi_regmap_config, "IIO_ICM42600");
+
struct inv_icm42600_hw {
uint8_t whoami;
const char *name;
@@ -814,6 +829,8 @@ out_unlock:
static int inv_icm42600_resume(struct device *dev)
{
struct inv_icm42600_state *st = dev_get_drvdata(dev);
+ struct inv_icm42600_sensor_state *gyro_st = iio_priv(st->indio_gyro);
+ struct inv_icm42600_sensor_state *accel_st = iio_priv(st->indio_accel);
int ret;
mutex_lock(&st->lock);
@@ -834,9 +851,12 @@ static int inv_icm42600_resume(struct device *dev)
goto out_unlock;
/* restore FIFO data streaming */
- if (st->fifo.on)
+ if (st->fifo.on) {
+ inv_sensors_timestamp_reset(&gyro_st->ts);
+ inv_sensors_timestamp_reset(&accel_st->ts);
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
INV_ICM42600_FIFO_CONFIG_STREAM);
+ }
out_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
index c55d8e672183..2bd2c4c8e50c 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
@@ -59,7 +59,8 @@ static int inv_icm42600_probe(struct spi_device *spi)
return -EINVAL;
chip = (uintptr_t)match;
- regmap = devm_regmap_init_spi(spi, &inv_icm42600_regmap_config);
+ /* use SPI specific regmap */
+ regmap = devm_regmap_init_spi(spi, &inv_icm42600_spi_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 324c38764656..e19c5d3137c6 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1193,7 +1193,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
struct kmx61_data *data = kmx61_get_data(indio_dev);
int bit, ret, i = 0;
u8 base;
- s16 buffer[8];
+ s16 buffer[8] = { };
if (indio_dev == data->acc_indio_dev)
base = KMX61_ACC_XOUT_L;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 136b225b6bc8..9050a59129e6 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -500,7 +500,7 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
return_ptr(chans);
error_free_chans:
- for (i = 0; i < nummaps; i++)
+ for (i = 0; i < mapind; i++)
iio_device_put(chans[i].indio_dev);
return ERR_PTR(ret);
}
diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
index 56e32689bb97..63bf729df517 100644
--- a/drivers/iio/light/bh1745.c
+++ b/drivers/iio/light/bh1745.c
@@ -746,6 +746,8 @@ static irqreturn_t bh1745_trigger_handler(int interrupt, void *p)
int i;
int j = 0;
+ memset(&scan, 0, sizeof(scan));
+
iio_for_each_active_channel(indio_dev, i) {
ret = regmap_bulk_read(data->regmap, BH1745_RED_LSB + 2 * i,
&value, 2);
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 337a1332c2c6..67c94be02018 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -105,7 +105,7 @@ static irqreturn_t vcnl4035_trigger_consumer_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct vcnl4035_data *data = iio_priv(indio_dev);
/* Ensure naturally aligned timestamp */
- u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8);
+ u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8) = { };
int ret;
ret = regmap_read(data->regmap, VCNL4035_ALS_DATA, (int *)buffer);
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 2adea84f5b4d..9db1c94dfc18 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -586,6 +586,8 @@ static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev,
} sample;
int err;
+ memset(&sample, 0, sizeof(sample));
+
if (test_bit(0, indio_dev->active_scan_mask)) {
/* Get current pressure from hardware FIFO. */
err = zpa2326_dequeue_pressure(indio_dev, &sample.pressure);
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 0c844137d7aa..02b27f471baa 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -252,6 +252,8 @@ static irqreturn_t tmp006_trigger_handler(int irq, void *p)
} scan;
s32 ret;
+ memset(&scan, 0, sizeof(scan));
+
ret = i2c_smbus_read_word_data(data->client, TMP006_VOBJECT);
if (ret < 0)
goto err;
diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig
index 33cca49c8058..7a181cac3cc9 100644
--- a/drivers/iio/test/Kconfig
+++ b/drivers/iio/test/Kconfig
@@ -5,7 +5,7 @@
# Keep in alphabetical order
config IIO_GTS_KUNIT_TEST
- tristate "Test IIO formatting functions" if !KUNIT_ALL_TESTS
+ tristate "Test IIO gain-time-scale helpers" if !KUNIT_ALL_TESTS
depends on KUNIT
select IIO_GTS_HELPER
select TEST_KUNIT_DEVICE_HELPERS
diff --git a/drivers/iio/test/iio-test-rescale.c b/drivers/iio/test/iio-test-rescale.c
index cbf13337ed1f..bbc6a2e1c2c1 100644
--- a/drivers/iio/test/iio-test-rescale.c
+++ b/drivers/iio/test/iio-test-rescale.c
@@ -652,6 +652,8 @@ static void iio_rescale_test_scale(struct kunit *test)
int rel_ppm;
int ret;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buff);
+
rescale.numerator = t->numerator;
rescale.denominator = t->denominator;
rescale.offset = t->offset;
@@ -681,6 +683,8 @@ static void iio_rescale_test_offset(struct kunit *test)
int values[2];
int ret;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buff_off);
+
rescale.numerator = t->numerator;
rescale.denominator = t->denominator;
rescale.offset = t->offset;
diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
index b956e4050f38..88f311c11020 100644
--- a/drivers/interconnect/icc-clk.c
+++ b/drivers/interconnect/icc-clk.c
@@ -116,6 +116,11 @@ struct icc_provider *icc_clk_register(struct device *dev,
}
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_master", data[i].name);
+ if (!node->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
node->data = &qp->clocks[i];
icc_node_add(node, provider);
/* link to the next node, slave */
@@ -129,6 +134,11 @@ struct icc_provider *icc_clk_register(struct device *dev,
}
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_slave", data[i].name);
+ if (!node->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/* no data for slave node */
icc_node_add(node, provider);
onecell->nodes[j++] = node;
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index a8ed435f696c..ea1042d38128 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -503,6 +503,7 @@ int qnoc_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->num_nodes = num_nodes;
qp->num_intf_clks = cd_num;
for (i = 0; i < cd_num; i++)
@@ -597,7 +598,6 @@ regmap_done:
data->nodes[i] = node;
}
- data->num_nodes = num_nodes;
clk_bulk_disable_unprepare(qp->num_intf_clks, qp->intf_clks);
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index ec5db1478b2f..18ae45dcbfb2 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -442,7 +442,7 @@ static int ebs_iterate_devices(struct dm_target *ti,
static struct target_type ebs_target = {
.name = "ebs",
.version = {1, 0, 1},
- .features = DM_TARGET_PASSES_INTEGRITY,
+ .features = 0,
.module = THIS_MODULE,
.ctr = ebs_ctr,
.dtr = ebs_dtr,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index bf0f9dddd146..05cf4e3f2bbe 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2332,10 +2332,9 @@ static struct thin_c *get_first_thin(struct pool *pool)
struct thin_c *tc = NULL;
rcu_read_lock();
- if (!list_empty(&pool->active_thins)) {
- tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
+ tc = list_first_or_null_rcu(&pool->active_thins, struct thin_c, list);
+ if (tc)
thin_get(tc);
- }
rcu_read_unlock();
return tc;
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 62b1a44b8dd2..e61855da6461 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -40,35 +40,23 @@ static inline u64 fec_interleave(struct dm_verity *v, u64 offset)
}
/*
- * Decode an RS block using Reed-Solomon.
- */
-static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
- u8 *data, u8 *fec, int neras)
-{
- int i;
- uint16_t par[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN];
-
- for (i = 0; i < v->fec->roots; i++)
- par[i] = fec[i];
-
- return decode_rs8(fio->rs, data, par, v->fec->rsn, NULL, neras,
- fio->erasures, 0, NULL);
-}
-
-/*
* Read error-correcting codes for the requested RS block. Returns a pointer
* to the data block. Caller is responsible for releasing buf.
*/
static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
- unsigned int *offset, struct dm_buffer **buf,
- unsigned short ioprio)
+ unsigned int *offset, unsigned int par_buf_offset,
+ struct dm_buffer **buf, unsigned short ioprio)
{
u64 position, block, rem;
u8 *res;
+ /* We have already part of parity bytes read, skip to the next block */
+ if (par_buf_offset)
+ index++;
+
position = (index + rsb) * v->fec->roots;
block = div64_u64_rem(position, v->fec->io_size, &rem);
- *offset = (unsigned int)rem;
+ *offset = par_buf_offset ? 0 : (unsigned int)rem;
res = dm_bufio_read_with_ioprio(v->fec->bufio, block, buf, ioprio);
if (IS_ERR(res)) {
@@ -128,11 +116,13 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
{
int r, corrected = 0, res;
struct dm_buffer *buf;
- unsigned int n, i, offset;
+ unsigned int n, i, j, offset, par_buf_offset = 0;
+ uint16_t par_buf[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN];
u8 *par, *block;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
- par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
+ par = fec_read_parity(v, rsb, block_offset, &offset,
+ par_buf_offset, &buf, bio_prio(bio));
if (IS_ERR(par))
return PTR_ERR(par);
@@ -142,7 +132,11 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
*/
fec_for_each_buffer_rs_block(fio, n, i) {
block = fec_buffer_rs_block(v, fio, n, i);
- res = fec_decode_rs8(v, fio, block, &par[offset], neras);
+ for (j = 0; j < v->fec->roots - par_buf_offset; j++)
+ par_buf[par_buf_offset + j] = par[offset + j];
+ /* Decode an RS block using Reed-Solomon */
+ res = decode_rs8(fio->rs, block, par_buf, v->fec->rsn,
+ NULL, neras, fio->erasures, 0, NULL);
if (res < 0) {
r = res;
goto error;
@@ -155,12 +149,22 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
if (block_offset >= 1 << v->data_dev_block_bits)
goto done;
- /* read the next block when we run out of parity bytes */
- offset += v->fec->roots;
+ /* Read the next block when we run out of parity bytes */
+ offset += (v->fec->roots - par_buf_offset);
+ /* Check if parity bytes are split between blocks */
+ if (offset < v->fec->io_size && (offset + v->fec->roots) > v->fec->io_size) {
+ par_buf_offset = v->fec->io_size - offset;
+ for (j = 0; j < par_buf_offset; j++)
+ par_buf[j] = par[offset + j];
+ offset += par_buf_offset;
+ } else
+ par_buf_offset = 0;
+
if (offset >= v->fec->io_size) {
dm_bufio_release(buf);
- par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
+ par = fec_read_parity(v, rsb, block_offset, &offset,
+ par_buf_offset, &buf, bio_prio(bio));
if (IS_ERR(par))
return PTR_ERR(par);
}
@@ -724,10 +728,7 @@ int verity_fec_ctr(struct dm_verity *v)
return -E2BIG;
}
- if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
- f->io_size = 1 << v->data_dev_block_bits;
- else
- f->io_size = v->fec->roots << SECTOR_SHIFT;
+ f->io_size = 1 << v->data_dev_block_bits;
f->bufio = dm_bufio_client_create(f->dev->bdev,
f->io_size,
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 157c9bd2fed7..8f8792e55806 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -917,23 +917,27 @@ static int load_ablock(struct dm_array_cursor *c)
if (c->block)
unlock_ablock(c->info, c->block);
- c->block = NULL;
- c->ab = NULL;
c->index = 0;
r = dm_btree_cursor_get_value(&c->cursor, &key, &value_le);
if (r) {
DMERR("dm_btree_cursor_get_value failed");
- dm_btree_cursor_end(&c->cursor);
+ goto out;
} else {
r = get_ablock(c->info, le64_to_cpu(value_le), &c->block, &c->ab);
if (r) {
DMERR("get_ablock failed");
- dm_btree_cursor_end(&c->cursor);
+ goto out;
}
}
+ return 0;
+
+out:
+ dm_btree_cursor_end(&c->cursor);
+ c->block = NULL;
+ c->ab = NULL;
return r;
}
@@ -956,10 +960,10 @@ EXPORT_SYMBOL_GPL(dm_array_cursor_begin);
void dm_array_cursor_end(struct dm_array_cursor *c)
{
- if (c->block) {
+ if (c->block)
unlock_ablock(c->info, c->block);
- dm_btree_cursor_end(&c->cursor);
- }
+
+ dm_btree_cursor_end(&c->cursor);
}
EXPORT_SYMBOL_GPL(dm_array_cursor_end);
@@ -999,6 +1003,7 @@ int dm_array_cursor_skip(struct dm_array_cursor *c, uint32_t count)
}
count -= remaining;
+ c->index += (remaining - 1);
r = dm_array_cursor_next(c);
} while (!r);
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
index e616e3ec2b42..3c1359d8d4e6 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -148,7 +148,7 @@ static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset,
pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), true);
break;
default:
- ret = -EOPNOTSUPP;
+ ret = -ENOTSUPP;
break;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -277,7 +277,7 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
spin_unlock_irqrestore(&priv->lock, flags);
irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32)));
- generic_handle_irq(irq);
+ handle_nested_irq(irq);
}
}
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 66949d9f0cc5..b6f374ded390 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -89,7 +89,7 @@ void spi_nor_spimem_setup_op(const struct spi_nor *nor,
op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->dummy.nbytes)
- op->dummy.buswidth = spi_nor_get_protocol_data_nbits(proto);
+ op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->data.nbytes)
op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 2681889162a2..44971e71991f 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -118,7 +118,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (err && err != -EIO)
return err;
- listlen = fw_list.num_fw_slots;
+ listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
for (i = 0; i < listlen; i++) {
if (i < ARRAY_SIZE(fw_slotnames))
strscpy(buf, fw_slotnames[i], sizeof(buf));
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 6a716337f48b..268399dfcf22 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -923,7 +923,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -945,14 +944,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
-
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
@@ -964,7 +956,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -1028,13 +1019,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b86f980fa7ea..b6f844cac80e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2897,6 +2897,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
return 0;
}
+static bool bnxt_vnic_is_active(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+
+ return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
+}
+
static irqreturn_t bnxt_msix(int irq, void *dev_instance)
{
struct bnxt_napi *bnapi = dev_instance;
@@ -3164,7 +3171,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
- if (bp->flags & BNXT_FLAG_DIM) {
+ if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -3295,7 +3302,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
poll_done:
cpr_rx = &cpr->cp_ring_arr[0];
if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
- (bp->flags & BNXT_FLAG_DIM)) {
+ (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -4701,7 +4708,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* Changing allocation mode of RX rings.
* TODO: Update when extending xdp_rxq_info to support allocation modes.
*/
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
@@ -4722,15 +4729,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_skb_func = bnxt_rx_page_skb;
}
bp->rx_dir = DMA_BIDIRECTIONAL;
- /* Disable LRO or GRO_HW */
- netdev_update_features(dev);
} else {
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
}
- return 0;
+}
+
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ __bnxt_set_rx_skb_mode(bp, page_mode);
+
+ if (!page_mode) {
+ int rx, tx;
+
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+
+ /* Update LRO and GRO_HW availability */
+ netdev_update_features(bp->dev);
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
@@ -7266,6 +7288,26 @@ err_out:
return rc;
}
+static void bnxt_cancel_dim(struct bnxt *bp)
+{
+ int i;
+
+ /* DIM work is initialized in bnxt_enable_napi(). Proceed only
+ * if NAPI is enabled.
+ */
+ if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
+ return;
+
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_napi *bnapi = rxr->bnapi;
+
+ cancel_work_sync(&bnapi->cp_ring.dim.work);
+ }
+}
+
static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, int cmpl_ring_id)
@@ -7366,6 +7408,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+ bnxt_cancel_dim(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
@@ -11309,8 +11352,6 @@ static void bnxt_disable_napi(struct bnxt *bp)
if (bnapi->in_reset)
cpr->sw_stats->rx.rx_resets++;
napi_disable(&bnapi->napi);
- if (bnapi->rx_ring)
- cancel_work_sync(&cpr->dim.work);
}
}
@@ -15572,8 +15613,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
bnxt_hwrm_vnic_update(bp, vnic,
VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
}
-
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
rxr = &bp->rx_ring[idx];
+ cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
rxr->rx_next_cons = 0;
@@ -16186,7 +16229,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bp->max_fltr < BNXT_MAX_FLTR)
bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
- bnxt_set_rx_skb_mode(bp, false);
+ __bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
bnxt_rdma_aux_device_init(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7df7a2233307..f11ed59203d9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2846,7 +2846,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b771c84cdd89..0ed26e3a28f4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -208,7 +208,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
if (rc)
- return rc;
+ goto drop_req;
hwrm_req_timeout(bp, req, fw_msg->timeout);
resp = hwrm_req_hold(bp, req);
@@ -220,6 +220,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
memcpy(fw_msg->resp, resp, resp_len);
}
+drop_req:
hwrm_req_drop(bp, req);
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f88b641533fc..dc51dce209d5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -422,15 +422,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bnxt_set_rx_skb_mode(bp, true);
xdp_features_set_redirect_target(dev, true);
} else {
- int rx, tx;
-
xdp_features_clear_redirect_target(dev);
bnxt_set_rx_skb_mode(bp, false);
- bnxt_get_max_rings(bp, &rx, &tx, true);
- if (rx > 1) {
- bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features |= NETIF_F_LRO;
- }
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index bc3af0054406..604dcfd49aa4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1799,7 +1799,10 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
struct adapter *adap = container_of(t, struct adapter, tids);
struct sk_buff *skb;
- WARN_ON(tid_out_of_range(&adap->tids, tid));
+ if (tid_out_of_range(&adap->tids, tid)) {
+ dev_err(adap->pdev_dev, "tid %d out of range\n", tid);
+ return;
+ }
if (t->tid_tab[tid - adap->tids.tid_base]) {
t->tid_tab[tid - adap->tids.tid_base] = NULL;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1b55047c0237..4566848e1d7c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1591,19 +1591,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget)
fec_enet_tx_queue(ndev, i, budget);
}
-static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
struct bufdesc *bdp, int index)
{
struct page *new_page;
dma_addr_t phys_addr;
new_page = page_pool_dev_alloc_pages(rxq->page_pool);
- WARN_ON(!new_page);
- rxq->rx_skb_info[index].page = new_page;
+ if (unlikely(!new_page))
+ return -ENOMEM;
+ rxq->rx_skb_info[index].page = new_page;
rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+
+ return 0;
}
static u32
@@ -1698,6 +1701,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
int cpu = smp_processor_id();
struct xdp_buff xdp;
struct page *page;
+ __fec32 cbd_bufaddr;
u32 sub_len = 4;
#if !defined(CONFIG_M5272)
@@ -1766,12 +1770,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
index = fec_enet_get_bd_index(bdp, &rxq->bd);
page = rxq->rx_skb_info[index].page;
+ cbd_bufaddr = bdp->cbd_bufaddr;
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
+ fec32_to_cpu(cbd_bufaddr),
pkt_len,
DMA_FROM_DEVICE);
prefetch(page_address(page));
- fec_enet_update_cbd(rxq, bdp, index);
if (xdp_prog) {
xdp_buff_clear_frags_flag(&xdp);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 8a8f6ab12a98..533e659b15b3 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -2241,14 +2241,18 @@ static void gve_service_task(struct work_struct *work)
static void gve_set_netdev_xdp_features(struct gve_priv *priv)
{
+ xdp_features_t xdp_features;
+
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
- priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ xdp_features = NETDEV_XDP_ACT_BASIC;
+ xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
+ xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
- priv->dev->xdp_features = 0;
+ xdp_features = 0;
}
+
+ xdp_set_features_flag(priv->dev, xdp_features);
}
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 710a8f9f2248..12ba380eb701 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -916,9 +916,6 @@ struct hnae3_handle {
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
- /* protects concurrent contention between debugfs commands */
- struct mutex dbgfs_lock;
- char **dbgfs_buf;
/* Network interface message level enabled bits */
u32 msg_enable;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 807eb3bbb11c..9bbece25552b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1260,69 +1260,55 @@ static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
- struct hns3_dbg_data *dbg_data = filp->private_data;
+ char *buf = filp->private_data;
+
+ return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+}
+
+static int hns3_dbg_open(struct inode *inode, struct file *filp)
+{
+ struct hns3_dbg_data *dbg_data = inode->i_private;
struct hnae3_handle *handle = dbg_data->handle;
struct hns3_nic_priv *priv = handle->priv;
- ssize_t size = 0;
- char **save_buf;
- char *read_buf;
u32 index;
+ char *buf;
int ret;
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
+
ret = hns3_dbg_get_cmd_index(dbg_data, &index);
if (ret)
return ret;
- mutex_lock(&handle->dbgfs_lock);
- save_buf = &handle->dbgfs_buf[index];
-
- if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
- test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (*save_buf) {
- read_buf = *save_buf;
- } else {
- read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!read_buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* save the buffer addr until the last read operation */
- *save_buf = read_buf;
-
- /* get data ready for the first time to read */
- ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
- read_buf, hns3_dbg_cmd[index].buf_len);
- if (ret)
- goto out;
- }
+ buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- size = simple_read_from_buffer(buffer, count, ppos, read_buf,
- strlen(read_buf));
- if (size > 0) {
- mutex_unlock(&handle->dbgfs_lock);
- return size;
+ ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
+ buf, hns3_dbg_cmd[index].buf_len);
+ if (ret) {
+ kvfree(buf);
+ return ret;
}
-out:
- /* free the buffer for the last read operation */
- if (*save_buf) {
- kvfree(*save_buf);
- *save_buf = NULL;
- }
+ filp->private_data = buf;
+ return 0;
+}
- mutex_unlock(&handle->dbgfs_lock);
- return ret;
+static int hns3_dbg_release(struct inode *inode, struct file *filp)
+{
+ kvfree(filp->private_data);
+ filp->private_data = NULL;
+ return 0;
}
static const struct file_operations hns3_dbg_fops = {
.owner = THIS_MODULE,
- .open = simple_open,
+ .open = hns3_dbg_open,
.read = hns3_dbg_read,
+ .release = hns3_dbg_release,
};
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
@@ -1379,13 +1365,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
int ret;
u32 i;
- handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
- ARRAY_SIZE(hns3_dbg_cmd),
- sizeof(*handle->dbgfs_buf),
- GFP_KERNEL);
- if (!handle->dbgfs_buf)
- return -ENOMEM;
-
hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
debugfs_create_dir(name, hns3_dbgfs_root);
handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
@@ -1395,8 +1374,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
debugfs_create_dir(hns3_dbg_dentry[i].name,
handle->hnae3_dbgfs);
- mutex_init(&handle->dbgfs_lock);
-
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1425,24 +1402,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
out:
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
- mutex_destroy(&handle->dbgfs_lock);
return ret;
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
{
- u32 i;
-
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
- if (handle->dbgfs_buf[i]) {
- kvfree(handle->dbgfs_buf[i]);
- handle->dbgfs_buf[i] = NULL;
- }
-
- mutex_destroy(&handle->dbgfs_lock);
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 43377a7b2426..a7e3b22f641c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2452,7 +2452,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
- netdev->features = features;
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 05942fa78b11..db7845009252 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -6,6 +6,7 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -3574,6 +3575,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
return ret;
}
+static void hclge_set_reset_pending(struct hclge_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
@@ -3594,7 +3606,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
@@ -3604,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3759,7 +3771,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- 0, hdev->misc_vector.name, hdev);
+ IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -4052,7 +4064,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
- set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
hclge_reset_task_schedule(hdev);
break;
default:
@@ -4086,6 +4098,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
if (hdev->reset_type != HNAE3_NONE_RESET &&
rst_level < hdev->reset_type)
return HNAE3_NONE_RESET;
@@ -4227,7 +4241,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
return false;
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->rst_stats.reset_fail_cnt++;
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, hdev->reset_type);
dev_info(&hdev->pdev->dev,
"re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt);
@@ -4470,8 +4484,20 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGE_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
+ BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
+
struct hclge_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
+
set_bit(rst_type, &hdev->default_reset_request);
}
@@ -11881,9 +11907,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_init_rxd_adv_layout(hdev);
- /* Enable MISC vector(vector0) */
- hclge_enable_vector(&hdev->misc_vector, true);
-
ret = hclge_init_wol(hdev);
if (ret)
dev_warn(&pdev->dev,
@@ -11896,6 +11919,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
+ /* Enable MISC vector(vector0) */
+ enable_irq(hdev->misc_vector.vector_irq);
+ hclge_enable_vector(&hdev->misc_vector, true);
+
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -12301,7 +12328,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- synchronize_irq(hdev->misc_vector.vector_irq);
+ disable_irq(hdev->misc_vector.vector_irq);
/* Disable all hw interrupts */
hclge_config_mac_tnl_int(hdev, false);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 5505caea88e9..bab16c2191b2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -58,6 +58,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
struct hclge_dev *hdev = vport->back;
struct hclge_ptp *ptp = hdev->ptp;
+ if (!ptp)
+ return false;
+
if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
ptp->tx_skipped++;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
index 43c1c18fa81f..8c057192aae6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
@@ -510,9 +510,9 @@ out:
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
struct hnae3_knic_private_info *kinfo)
{
-#define HCLGE_RING_REG_OFFSET 0x200
#define HCLGE_RING_INT_REG_OFFSET 0x4
+ struct hnae3_queue *tqp;
int i, j, reg_num;
int data_num_sum;
u32 *reg = data;
@@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < kinfo->num_tqps; j++) {
reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
+ tqp = kinfo->tqp[j];
for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGE_RING_REG_OFFSET * j);
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGE_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 2f6ffb88e700..163c6e59ea4c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1393,6 +1393,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
return ret;
}
+static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1542,7 +1553,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
hdev->rst_stats.rst_fail_cnt);
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_type);
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
@@ -1662,6 +1673,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
return rst_level;
}
@@ -1671,14 +1684,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
struct hclgevf_dev *hdev = ae_dev->priv;
- dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
-
if (hdev->default_reset_request)
hdev->reset_level =
hclgevf_get_reset_level(&hdev->default_reset_request);
else
hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
+ hdev->reset_level);
+
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
@@ -1689,8 +1703,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGEVF_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
+ BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
+ BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
+
struct hclgevf_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
set_bit(rst_type, &hdev->default_reset_request);
}
@@ -1847,14 +1873,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
*/
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
- set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- set_bit(hdev->reset_level, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_level);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
hclgevf_reset_task_schedule(hdev);
@@ -1977,7 +2003,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
- set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
@@ -2287,6 +2313,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
+ /* timer needs to be initialized before misc irq */
+ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
mutex_init(&hdev->mbx_resp.mbx_mutex);
sema_init(&hdev->reset_sem, 1);
@@ -2986,7 +3014,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
HCLGEVF_DRIVER_NAME);
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
- timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 6db415d8b917..7d9d9dbc7560 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
void *data)
{
-#define HCLGEVF_RING_REG_OFFSET 0x200
#define HCLGEVF_RING_INT_REG_OFFSET 0x4
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *tqp;
int i, j, reg_um;
u32 *reg = data;
@@ -147,10 +147,11 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
reg_um = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
+ tqp = &hdev->htqp[j].q;
for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGEVF_RING_REG_OFFSET * j);
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGEVF_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 1489a8ceec51..46f9726d9a8a 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1665,6 +1665,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
u8 global_scid[2];
u8 phy_scid[2];
@@ -2264,6 +2265,8 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0)
+
/* Get CGU abilities command response data structure (indirect 0x0C61) */
struct ice_aqc_get_cgu_abilities {
u8 num_inputs;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 496d86cbd13f..532024f34ce4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4096,6 +4096,57 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
}
/**
+ * ice_get_phy_lane_number - Get PHY lane number for current adapter
+ * @hw: pointer to the hw struct
+ *
+ * Return: PHY lane number on success, negative error code otherwise.
+ */
+int ice_get_phy_lane_number(struct ice_hw *hw)
+{
+ struct ice_aqc_get_port_options_elem *options;
+ unsigned int lport = 0;
+ unsigned int lane;
+ int err;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
+ u8 options_count = ICE_AQC_PORT_OPT_MAX;
+ u8 speed, active_idx, pending_idx;
+ bool active_valid, pending_valid;
+
+ err = ice_aq_get_port_options(hw, options, &options_count, lane,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (err)
+ goto err;
+
+ if (!active_valid)
+ continue;
+
+ speed = options[active_idx].max_lane_speed;
+ /* If we don't get speed for this lane, it's unoccupied */
+ if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G)
+ continue;
+
+ if (hw->pf_id == lport) {
+ kfree(options);
+ return lane;
+ }
+
+ lport++;
+ }
+
+ /* PHY lane not found */
+ err = -ENXIO;
+err:
+ kfree(options);
+ return err;
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 27208a60cece..fe6f88cfd948 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -193,6 +193,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
+int ice_get_phy_lane_number(struct ice_hw *hw);
int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index d5ad6d84007c..38e151c7ea23 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -2065,6 +2065,18 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
}
/**
+ * ice_dpll_phase_range_set - initialize phase adjust range helper
+ * @range: pointer to phase adjust range struct to be initialized
+ * @phase_adj: a value to be used as min(-)/max(+) boundary
+ */
+static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range,
+ u32 phase_adj)
+{
+ range->min = -phase_adj;
+ range->max = phase_adj;
+}
+
+/**
* ice_dpll_init_info_pins_generic - initializes generic pins info
* @pf: board private structure
* @input: if input pins initialized
@@ -2105,8 +2117,8 @@ static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
for (i = 0; i < pin_num; i++) {
pins[i].idx = i;
pins[i].prop.board_label = labels[i];
- pins[i].prop.phase_range.min = phase_adj_max;
- pins[i].prop.phase_range.max = -phase_adj_max;
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
pins[i].prop.capabilities = cap;
pins[i].pf = pf;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
@@ -2152,6 +2164,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
struct ice_hw *hw = &pf->hw;
struct ice_dpll_pin *pins;
unsigned long caps;
+ u32 phase_adj_max;
u8 freq_supp_num;
bool input;
@@ -2159,11 +2172,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
case ICE_DPLL_PIN_TYPE_INPUT:
pins = pf->dplls.inputs;
num_pins = pf->dplls.num_inputs;
+ phase_adj_max = pf->dplls.input_phase_adj_max;
input = true;
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
pins = pf->dplls.outputs;
num_pins = pf->dplls.num_outputs;
+ phase_adj_max = pf->dplls.output_phase_adj_max;
input = false;
break;
default:
@@ -2188,19 +2203,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
return ret;
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
- pins[i].prop.phase_range.min =
- pf->dplls.input_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.input_phase_adj_max;
} else {
- pins[i].prop.phase_range.min =
- pf->dplls.output_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.output_phase_adj_max;
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
if (ret)
return ret;
}
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
pins[i].prop.capabilities = caps;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
if (ret)
@@ -2308,8 +2317,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
dp->dpll_idx = abilities.pps_dpll_idx;
d->num_inputs = abilities.num_inputs;
d->num_outputs = abilities.num_outputs;
- d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
- d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
+ d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
+ d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
alloc_size = sizeof(*d->inputs) * d->num_inputs;
d->inputs = kzalloc(alloc_size, GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0ab35607e5d5..89fa3d53d317 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1144,7 +1144,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
- ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+ ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
@@ -6790,7 +6790,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
- ice_ptp_link_change(pf, pf->hw.pf_id, true);
+ ice_ptp_link_change(pf, true);
}
/* Perform an initial read of the statistics registers now to
@@ -7260,7 +7260,7 @@ int ice_down(struct ice_vsi *vsi)
if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
- ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
+ ice_ptp_link_change(vsi->back, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index a999fface272..efd770dfec44 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1388,10 +1388,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
/**
* ice_ptp_link_change - Reconfigure PTP after link status change
* @pf: Board private structure
- * @port: Port for which the PHY start is set
* @linkup: Link is up or down
*/
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
@@ -1399,14 +1398,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
- return;
-
ptp_port = &pf->ptp.port;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- port *= 2;
- if (WARN_ON_ONCE(ptp_port->port_num != port))
- return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
@@ -3164,10 +3156,17 @@ void ice_ptp_init(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
- int err;
+ int lane_num, err;
ptp->state = ICE_PTP_INITIALIZING;
+ lane_num = ice_get_phy_lane_number(hw);
+ if (lane_num < 0) {
+ err = lane_num;
+ goto err_exit;
+ }
+
+ ptp->port.port_num = (u8)lane_num;
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3188,10 +3187,6 @@ void ice_ptp_init(struct ice_pf *pf)
if (err)
goto err_exit;
- ptp->port.port_num = hw->pf_id;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- ptp->port.port_num = hw->pf_id * 2;
-
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
goto err_exit;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 824e73b677a4..c490d98fd9c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -310,7 +310,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
@@ -358,7 +358,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
-static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 585ce200c60f..a8e57cf05a9c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -131,7 +131,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
.rx_offset = {
.serdes = 0xffffeb27, /* -10.42424 */
.no_fec = 0xffffcccd, /* -25.6 */
- .fc = 0xfffe0014, /* -255.96 */
+ .fc = 0xfffc557b, /* -469.26 */
.sfd = 0x4a4, /* 2.32 */
.bs_ds = 0x32 /* 0.0969697 */
}
@@ -761,9 +761,9 @@ const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
/* rx_desk_rsgb_par */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* tx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* tx_fixed_delay */
1620,
/* pmd_adj_divisor */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 518893f23372..02e84f5b1d45 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -901,30 +901,45 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
*/
/**
+ * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
+ * @hw: pointer to the HW struct
+ * @port: destination port
+ *
+ * Return: destination sideband queue PHY device.
+ */
+static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
+ u8 port)
+{
+ /* On a single complex E825, PHY 0 is always destination device phy_0
+ * and PHY 1 is phy_0_peer.
+ */
+ if (port >= hw->ptp.ports_per_phy)
+ return eth56g_phy_1;
+ else
+ return eth56g_phy_0;
+}
+
+/**
* ice_write_phy_eth56g - Write a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to write to PHY
*/
-static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 val)
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr),
+ .data = val
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_wr;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = val;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
-
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
@@ -935,41 +950,36 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
/**
* ice_read_phy_eth56g - Read a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to read from PHY
*/
-static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 *val)
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr)
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_rd;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = 0;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
- if (err) {
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
- return err;
- }
-
- *val = phy_msg.data;
+ else
+ *val = msg.data;
- return 0;
+ return err;
}
/**
* ice_phy_res_address_eth56g - Calculate a PHY port register address
- * @port: Port number to be written
+ * @hw: pointer to the HW struct
+ * @lane: Lane number to be written
* @res_type: resource type (register/memory)
* @offset: Offset from PHY port register base
* @addr: The result address
@@ -978,17 +988,19 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
* * %0 - success
* * %EINVAL - invalid port number or resource type
*/
-static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
- u32 offset, u32 *addr)
+static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
+ enum eth56g_res_type res_type,
+ u32 offset,
+ u32 *addr)
{
- u8 lane = port % ICE_PORTS_PER_QUAD;
- u8 phy = ICE_GET_QUAD_NUM(port);
-
if (res_type >= NUM_ETH56G_PHY_RES)
return -EINVAL;
- *addr = eth56g_phy_res[res_type].base[phy] +
+ /* Lanes 4..7 are in fact 0..3 on a second PHY */
+ lane %= hw->ptp.ports_per_phy;
+ *addr = eth56g_phy_res[res_type].base[0] +
lane * eth56g_phy_res[res_type].step + offset;
+
return 0;
}
@@ -1008,19 +1020,17 @@ static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_write_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_write_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1039,19 +1049,17 @@ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 *val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_read_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_read_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1201,6 +1209,56 @@ static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
}
/**
+ * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+
+ return ice_write_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to read
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 *val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+
+ return ice_read_phy_eth56g(hw, port, addr, val);
+}
+
+/**
* ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
@@ -1919,7 +1977,6 @@ ice_phy_get_speed_eth56g(struct ice_link_status *li)
*/
static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 val;
int err;
@@ -1934,8 +1991,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
case ICE_ETH56G_LNK_SPD_1G:
case ICE_ETH56G_LNK_SPD_2_5G:
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, &val);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1946,8 +2003,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
ICE_ETH56G_NOMINAL_TX_THRESH);
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1988,50 +2045,47 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
*/
int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
- u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr, val, peer_delay;
bool enable, sfd_ena;
- u32 val, peer_delay;
int err;
enable = hw->ptp.phy.eth56g.onestep_ena;
peer_delay = hw->ptp.phy.eth56g.peer_delay;
sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
- /* PHY_PTP_1STEP_CONFIG */
- err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
+ addr = PHY_PTP_1STEP_CONFIG;
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
if (enable)
- val |= blk_port;
+ val |= BIT(quad_lane);
else
- val &= ~blk_port;
+ val &= ~BIT(quad_lane);
val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
- err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_PTP_1STEP_PEER_DELAY */
+ addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane);
val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
if (peer_delay)
val |= PHY_PTP_1STEP_PD_ADD_PD_M;
val |= PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_MAC_XIF_MODE */
- err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
+ addr = PHY_MAC_XIF_MODE;
+ err = ice_read_mac_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
@@ -2051,7 +2105,7 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
- return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
+ return ice_write_mac_reg_eth56g(hw, port, addr, val);
}
/**
@@ -2093,21 +2147,22 @@ static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
bool fc, bool rs,
enum ice_eth56g_link_spd spd)
{
- u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 bitslip;
int err;
if (!bs || rs)
return 0;
- if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) {
err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
&bitslip);
- else
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_REG_SD_BIT_SLIP(port_offset),
- &bitslip);
+ } else {
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr;
+
+ addr = PHY_REG_SD_BIT_SLIP(quad_lane);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip);
+ }
if (err)
return 0;
@@ -2667,59 +2722,29 @@ static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
}
/**
- * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
- * @hw: pointer to the HW struct
- *
- * Return: true if it's 2x50 breakout topology, false otherwise
- */
-static bool ice_is_muxed_topo(struct ice_hw *hw)
-{
- u8 link_topo;
- bool mux;
- u32 val;
-
- val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
- mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
- val = rd32(hw, GLGEN_MAC_LINK_TOPO);
- link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
-
- return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
-}
-
-/**
- * ice_ptp_init_phy_e825c - initialize PHY parameters
+ * ice_ptp_init_phy_e825 - initialize PHY parameters
* @hw: pointer to the HW struct
*/
-static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
+static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u8 phy;
+ u32 phy_rev;
+ int err;
ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
params->sfd_ena = false;
- params->phy_addr[0] = eth56g_phy_0;
- params->phy_addr[1] = eth56g_phy_1;
params->num_phys = 2;
ptp->ports_per_phy = 4;
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
ice_sb_access_ena_eth56g(hw, true);
- for (phy = 0; phy < params->num_phys; phy++) {
- u32 phy_rev;
- int err;
-
- err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G) {
- ptp->phy_model = ICE_PHY_UNSUP;
- return;
- }
- }
-
- ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
+ err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev);
+ if (err || phy_rev != PHY_REVISION_ETH56G)
+ ptp->phy_model = ICE_PHY_UNSUP;
}
/* E822 family functions
@@ -2738,10 +2763,9 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
struct ice_sbq_msg_input *msg, u8 port,
u16 offset)
{
- int phy_port, phy, quadtype;
+ int phy_port, quadtype;
phy_port = port % hw->ptp.ports_per_phy;
- phy = port / hw->ptp.ports_per_phy;
quadtype = ICE_GET_QUAD_NUM(port) %
ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
@@ -2753,12 +2777,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
}
- if (phy == 0)
- msg->dest_dev = rmn_0;
- else if (phy == 1)
- msg->dest_dev = rmn_1;
- else
- msg->dest_dev = rmn_2;
+ msg->dest_dev = rmn_0;
}
/**
@@ -5478,7 +5497,7 @@ void ice_ptp_init_hw(struct ice_hw *hw)
else if (ice_is_e810(hw))
ice_ptp_init_phy_e810(ptp);
else if (ice_is_e825c(hw))
- ice_ptp_init_phy_e825c(hw);
+ ice_ptp_init_phy_e825(hw);
else
ptp->phy_model = ICE_PHY_UNSUP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index adb168860711..4a9ef722635f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -850,7 +850,6 @@ struct ice_mbx_data {
struct ice_eth56g_params {
u8 num_phys;
- u8 phy_addr[2];
bool onestep_ena;
bool sfd_ena;
u32 peer_delay;
@@ -881,7 +880,6 @@ struct ice_ptp_hw {
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
- bool is_2x50g_muxed_topo;
};
/* Port hardware description */
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 9fae8bdec2a7..1613b562d17c 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -68,6 +68,10 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
u32 eecd = rd32(IGC_EECD);
u16 size;
+ /* failed to read reg and got all F's */
+ if (!(~eecd))
+ return -ENXIO;
+
size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
@@ -221,6 +225,8 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
/* NVM initialization */
ret_val = igc_init_nvm_params_base(hw);
+ if (ret_val)
+ goto out;
switch (hw->mac.type) {
case igc_i225:
ret_val = igc_init_nvm_params_i225(hw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 6bd8a18e3af3..e733b81e18a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1013,6 +1013,7 @@ static void cmd_work_handler(struct work_struct *work)
complete(&ent->done);
}
up(&cmd->vars.sem);
+ complete(&ent->slotted);
return;
}
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index ca92e518be76..1baf8933a07c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -724,6 +724,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = 1;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
@@ -768,9 +774,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
MLX5_IPSEC_RESCHED);
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- x->props.mode == XFRM_MODE_TUNNEL)
- xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
- MLX5E_IPSEC_TUNNEL_SA);
+ x->props.mode == XFRM_MODE_TUNNEL) {
+ xa_lock_bh(&ipsec->sadb);
+ __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
+ MLX5E_IPSEC_TUNNEL_SA);
+ xa_unlock_bh(&ipsec->sadb);
+ }
out:
x->xso.offload_handle = (unsigned long)sa_entry;
@@ -797,7 +806,6 @@ err_xfrm:
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old;
@@ -806,12 +814,6 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
-
- if (attrs->mode == XFRM_MODE_TUNNEL &&
- attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- /* Make sure that no ARP requests are running in parallel */
- flush_workqueue(ipsec->wq);
-
}
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e51b03d4c717..57861d34d46f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -1718,23 +1718,21 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
- else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
-
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
case XFRM_DEV_OFFLOAD_PACKET:
- if (attrs->reqid)
- setup_fte_reg_c4(spec, attrs->reqid);
+ setup_fte_reg_c4(spec, attrs->reqid);
err = setup_pkt_reformat(ipsec, attrs, &flow_act);
if (err)
goto err_pkt_reformat;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 53cfa39188cb..820debf3fbbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -91,8 +91,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
- struct mlx5_accel_esp_xfrm_attrs *attrs)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
@@ -120,8 +121,12 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
- if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+ if (!attrs->replay_esn.trigger)
+ MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
+ sa_entry->esn_state.esn);
+ }
if (attrs->lft.hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
@@ -175,7 +180,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
res = &mdev->mlx5e_res.hw_objs;
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
+ mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2eabfcc247c6..0ce999706d41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2709,6 +2709,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX:
root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_BYPASS_PRIO;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
root_ns = steering->rdma_rx_root_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index ab2717012b79..39e80704b1c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -530,7 +530,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
set_tt_map(port_sel, hash_type);
err = mlx5_lag_create_definers(ldev, hash_type, ports);
if (err)
- return err;
+ goto clear_port_sel;
if (port_sel->tunnel) {
err = mlx5_lag_create_inner_ttc_table(ldev);
@@ -549,6 +549,8 @@ destroy_inner:
mlx5_destroy_ttc_table(port_sel->inner.ttc);
destroy_definers:
mlx5_lag_destroy_definers(ldev);
+clear_port_sel:
+ memset(port_sel, 0, sizeof(*port_sel));
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a96be98be032..b96909fbeb12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -257,6 +257,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
return 0;
esw_err:
+ mlx5_sf_function_id_erase(table, sf);
mlx5_sf_free(table, sf);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
index 1bed75eca97d..740b719e7072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -382,6 +382,7 @@ err_alloc_bfreg:
bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
{
+ struct mutex *wc_state_lock = &mdev->wc_state_lock;
struct mlx5_core_dev *parent = NULL;
if (!MLX5_CAP_GEN(mdev, bf)) {
@@ -400,32 +401,31 @@ bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
*/
goto out;
- mutex_lock(&mdev->wc_state_lock);
-
- if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
- goto unlock;
-
#ifdef CONFIG_MLX5_SF
- if (mlx5_core_is_sf(mdev))
+ if (mlx5_core_is_sf(mdev)) {
parent = mdev->priv.parent_mdev;
+ wc_state_lock = &parent->wc_state_lock;
+ }
#endif
- if (parent) {
- mutex_lock(&parent->wc_state_lock);
+ mutex_lock(wc_state_lock);
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ goto unlock;
+
+ if (parent) {
mlx5_core_test_wc(parent);
mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
parent->wc_state);
mdev->wc_state = parent->wc_state;
- mutex_unlock(&parent->wc_state_lock);
+ } else {
+ mlx5_core_test_wc(mdev);
}
- mlx5_core_test_wc(mdev);
-
unlock:
- mutex_unlock(&mdev->wc_state_lock);
+ mutex_unlock(wc_state_lock);
out:
mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index 239b2258ec65..ea6214ca48e7 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -13,7 +13,6 @@ fbnic-y := fbnic_csr.o \
fbnic_ethtool.o \
fbnic_fw.o \
fbnic_hw_stats.o \
- fbnic_hwmon.o \
fbnic_irq.o \
fbnic_mac.o \
fbnic_netdev.o \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 706ae6104c8e..744eb0d95449 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -20,7 +20,6 @@ struct fbnic_dev {
struct device *dev;
struct net_device *netdev;
struct dentry *dbg_fbd;
- struct device *hwmon;
u32 __iomem *uc_addr0;
u32 __iomem *uc_addr4;
@@ -33,7 +32,6 @@ struct fbnic_dev {
struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
struct fbnic_fw_cap fw_cap;
- struct fbnic_fw_completion *cmpl_data;
/* Lock protecting Tx Mailbox queue to prevent possible races */
spinlock_t fw_tx_lock;
@@ -142,9 +140,6 @@ void fbnic_devlink_unregister(struct fbnic_dev *fbd);
int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
-void fbnic_hwmon_register(struct fbnic_dev *fbd);
-void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
-
int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 7cd8841920e4..221faf8c6756 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -44,13 +44,6 @@ struct fbnic_fw_cap {
u8 link_fec;
};
-struct fbnic_fw_completion {
- struct {
- s32 millivolts;
- s32 millidegrees;
- } tsene;
-};
-
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
deleted file mode 100644
index bcd1086e3768..000000000000
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) Meta Platforms, Inc. and affiliates. */
-
-#include <linux/hwmon.h>
-
-#include "fbnic.h"
-#include "fbnic_mac.h"
-
-static int fbnic_hwmon_sensor_id(enum hwmon_sensor_types type)
-{
- if (type == hwmon_temp)
- return FBNIC_SENSOR_TEMP;
- if (type == hwmon_in)
- return FBNIC_SENSOR_VOLTAGE;
-
- return -EOPNOTSUPP;
-}
-
-static umode_t fbnic_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- if (type == hwmon_temp && attr == hwmon_temp_input)
- return 0444;
- if (type == hwmon_in && attr == hwmon_in_input)
- return 0444;
-
- return 0;
-}
-
-static int fbnic_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct fbnic_dev *fbd = dev_get_drvdata(dev);
- const struct fbnic_mac *mac = fbd->mac;
- int id;
-
- id = fbnic_hwmon_sensor_id(type);
- return id < 0 ? id : mac->get_sensor(fbd, id, val);
-}
-
-static const struct hwmon_ops fbnic_hwmon_ops = {
- .is_visible = fbnic_hwmon_is_visible,
- .read = fbnic_hwmon_read,
-};
-
-static const struct hwmon_channel_info *fbnic_hwmon_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
- HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
- NULL
-};
-
-static const struct hwmon_chip_info fbnic_chip_info = {
- .ops = &fbnic_hwmon_ops,
- .info = fbnic_hwmon_info,
-};
-
-void fbnic_hwmon_register(struct fbnic_dev *fbd)
-{
- if (!IS_REACHABLE(CONFIG_HWMON))
- return;
-
- fbd->hwmon = hwmon_device_register_with_info(fbd->dev, "fbnic",
- fbd, &fbnic_chip_info,
- NULL);
- if (IS_ERR(fbd->hwmon)) {
- dev_notice(fbd->dev,
- "Failed to register hwmon device %pe\n",
- fbd->hwmon);
- fbd->hwmon = NULL;
- }
-}
-
-void fbnic_hwmon_unregister(struct fbnic_dev *fbd)
-{
- if (!IS_REACHABLE(CONFIG_HWMON) || !fbd->hwmon)
- return;
-
- hwmon_device_unregister(fbd->hwmon);
- fbd->hwmon = NULL;
-}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 80b82ff12c4d..7b654d0a6dac 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -686,27 +686,6 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
MAC_STAT_TX_BROADCAST);
}
-static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, long *val)
-{
- struct fbnic_fw_completion fw_cmpl;
- s32 *sensor;
-
- switch (id) {
- case FBNIC_SENSOR_TEMP:
- sensor = &fw_cmpl.tsene.millidegrees;
- break;
- case FBNIC_SENSOR_VOLTAGE:
- sensor = &fw_cmpl.tsene.millivolts;
- break;
- default:
- return -EINVAL;
- }
-
- *val = *sensor;
-
- return 0;
-}
-
static const struct fbnic_mac fbnic_mac_asic = {
.init_regs = fbnic_mac_init_regs,
.pcs_enable = fbnic_pcs_enable_asic,
@@ -716,7 +695,6 @@ static const struct fbnic_mac fbnic_mac_asic = {
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
.link_down = fbnic_mac_link_down_asic,
.link_up = fbnic_mac_link_up_asic,
- .get_sensor = fbnic_mac_get_sensor_asic,
};
/**
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index 05a591653e09..476239a9d381 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -47,11 +47,6 @@ enum {
#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
-enum fbnic_sensor_id {
- FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */
- FBNIC_SENSOR_VOLTAGE, /* Voltage in millivolts */
-};
-
/* This structure defines the interface hooks for the MAC. The MAC hooks
* will be configured as a const struct provided with a set of function
* pointers.
@@ -88,8 +83,6 @@ struct fbnic_mac {
void (*link_down)(struct fbnic_dev *fbd);
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
-
- int (*get_sensor)(struct fbnic_dev *fbd, int id, long *val);
};
int fbnic_mac_init(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 32702dc4a066..7ccf192f13d5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -296,8 +296,6 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Capture snapshot of hardware stats so netdev can calculate delta */
fbnic_reset_hw_stats(fbd);
- fbnic_hwmon_register(fbd);
-
if (!fbd->dsn) {
dev_warn(&pdev->dev, "Reading serial number failed\n");
goto init_failure_mode;
@@ -360,7 +358,6 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_netdev_free(fbd);
}
- fbnic_hwmon_unregister(fbd);
fbnic_dbg_fbd_exit(fbd);
fbnic_devlink_unregister(fbd);
fbnic_fw_disable_mbx(fbd);
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 2dc0c6ad54be..be95336ce089 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1656,9 +1656,9 @@ static int __init mana_driver_init(void)
static void __exit mana_driver_exit(void)
{
- debugfs_remove(mana_debugfs_root);
-
pci_unregister_driver(&mana_driver);
+
+ debugfs_remove(mana_debugfs_root);
}
module_init(mana_driver_init);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 9d97cd281f18..c03558adda91 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
map_id_full = be64_to_cpu(cbe->map_ptr);
map_id = map_id_full;
- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ if (size_add(pkt_size, data_size) > INT_MAX ||
+ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL;
if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 739707a7b40f..8a3959bb2360 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
-#include <linux/hwmon.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@@ -5347,43 +5346,6 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
return false;
}
-static umode_t r8169_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
-static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct rtl8169_private *tp = dev_get_drvdata(dev);
- int val_raw;
-
- val_raw = phy_read_paged(tp->phydev, 0xbd8, 0x12) & 0x3ff;
- if (val_raw >= 512)
- val_raw -= 1024;
-
- *val = 1000 * val_raw / 2;
-
- return 0;
-}
-
-static const struct hwmon_ops r8169_hwmon_ops = {
- .is_visible = r8169_hwmon_is_visible,
- .read = r8169_hwmon_read,
-};
-
-static const struct hwmon_channel_info * const r8169_hwmon_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
- NULL
-};
-
-static const struct hwmon_chip_info r8169_hwmon_chip_info = {
- .ops = &r8169_hwmon_ops,
- .info = r8169_hwmon_info,
-};
-
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
@@ -5563,12 +5525,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* The temperature sensor is available from RTl8125B */
- if (IS_REACHABLE(CONFIG_HWMON) && tp->mac_version >= RTL_GIGA_MAC_VER_63)
- /* ignore errors */
- devm_hwmon_device_register_with_info(&pdev->dev, "nic_temp", tp,
- &r8169_hwmon_chip_info,
- NULL);
rc = register_netdev(dev);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index de7f11232593..c42c0516656b 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1827,7 +1827,7 @@ static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
for (i = 0; i < tp->int_nums; i++) {
irq = pci_irq_vector(pdev, i);
- if (!irq) {
+ if (irq < 0) {
pci_disable_msix(pdev);
return irq;
}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac0f093f647a..bc395294a32d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2763,6 +2763,7 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
index 3827997d2132..dc903b846b1b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/module.h>
@@ -19,6 +20,8 @@ struct tegra_mgbe {
struct reset_control *rst_mac;
struct reset_control *rst_pcs;
+ u32 iommu_sid;
+
void __iomem *hv;
void __iomem *regs;
void __iomem *xpcs;
@@ -50,7 +53,6 @@ struct tegra_mgbe {
#define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704
#define MAC_SBD_INTR BIT(2)
#define MGBE_WRAP_AXI_ASID0_CTRL 0x8400
-#define MGBE_SID 0x6
static int __maybe_unused tegra_mgbe_suspend(struct device *dev)
{
@@ -84,7 +86,7 @@ static int __maybe_unused tegra_mgbe_resume(struct device *dev)
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
/* Program SID */
- writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
@@ -241,6 +243,12 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
if (IS_ERR(mgbe->xpcs))
return PTR_ERR(mgbe->xpcs);
+ /* get controller's stream id from iommu property in device tree */
+ if (!tegra_dev_iommu_get_stream_id(mgbe->dev, &mgbe->iommu_sid)) {
+ dev_err(mgbe->dev, "failed to get iommu stream id\n");
+ return -EINVAL;
+ }
+
res.addr = mgbe->regs;
res.irq = irq;
@@ -346,7 +354,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
/* Program SID */
- writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 52e4e350b734..5cc72a91f220 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -127,15 +127,15 @@ struct cpsw_ale_dev_id {
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
- int idx, idx2;
+ int idx, idx2, index;
u32 hi_val = 0;
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be fetched exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
+ index = 2 - idx2; /* flip */
+ hi_val = ale_entry[index] << ((idx2 * 32) - start);
}
start -= idx * 32;
idx = 2 - idx; /* flip */
@@ -145,16 +145,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
u32 value)
{
- int idx, idx2;
+ int idx, idx2, index;
value &= BITMASK(bits);
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be set exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
- ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
+ index = 2 - idx2; /* flip */
+ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32)));
+ ale_entry[index] |= (value >> ((idx2 * 32) - start));
}
start -= idx * 32;
idx = 2 - idx; /* flip */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 1bf9c38e4125..deaf670c160e 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -334,27 +334,25 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
+ buf[0] = rd32(wx, WX_MNG_MBOX);
+ if ((buf[0] & 0xff0000) >> 16 == 0x80) {
+ wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff);
+ status = -EINVAL;
+ goto rel_out;
+ }
+
/* Check command completion */
if (status) {
- wx_dbg(wx, "Command has failed with no status valid.\n");
-
- buf[0] = rd32(wx, WX_MNG_MBOX);
- if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
- status = -EINVAL;
- goto rel_out;
- }
- if ((buf[0] & 0xff0000) >> 16 == 0x80) {
- wx_dbg(wx, "It's unknown cmd.\n");
- status = -EINVAL;
- goto rel_out;
- }
-
+ wx_err(wx, "Command has failed with no status valid.\n");
wx_dbg(wx, "write value:\n");
for (i = 0; i < dword_len; i++)
wx_dbg(wx, "%x ", buffer[i]);
wx_dbg(wx, "read value:\n");
for (i = 0; i < dword_len; i++)
wx_dbg(wx, "%x ", buf[i]);
+ wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24);
+
+ goto rel_out;
}
if (!return_data)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0f4b02fe6f85..ae743991117c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2056,6 +2056,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EBUSY;
}
+ if (ecoalesce->rx_max_coalesced_frames > 255 ||
+ ecoalesce->tx_max_coalesced_frames > 255) {
+ NL_SET_ERR_MSG(extack, "frames must be less than 256");
+ return -EINVAL;
+ }
+
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
if (ecoalesce->rx_coalesce_usecs)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 89a996ad8cd0..fbabada7d3ba 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1524,8 +1524,8 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
goto out_encap;
}
- gn = net_generic(dev_net(dev), gtp_net_id);
- list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+ gn = net_generic(src_net, gtp_net_id);
+ list_add(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface\n");
@@ -1551,7 +1551,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head)
hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
- list_del_rcu(&gtp->list);
+ list_del(&gtp->list);
unregister_netdevice_queue(dev, head);
}
@@ -2271,16 +2271,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
struct pdp_ctx *pctx;
- struct gtp_net *gn;
-
- gn = net_generic(net, gtp_net_id);
if (cb->args[4])
return 0;
rcu_read_lock();
- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+ for_each_netdev_rcu(net, dev) {
+ if (dev->rtnl_link_ops != &gtp_link_ops)
+ continue;
+
+ gtp = netdev_priv(dev);
+
if (last_gtp && last_gtp != gtp)
continue;
else
@@ -2475,9 +2478,14 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
list_for_each_entry(net, net_list, exit_list) {
struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
+ struct gtp_dev *gtp, *gtp_next;
+ struct net_device *dev;
+
+ for_each_netdev(net, dev)
+ if (dev->rtnl_link_ops == &gtp_link_ops)
+ gtp_dellink(dev, dev_to_kill);
- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, dev_to_kill);
}
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index e685a7f946f0..753215ebc67c 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -3072,7 +3072,11 @@ static int ca8210_probe(struct spi_device *spi_device)
spi_set_drvdata(priv->spi, priv);
if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) {
cascoda_api_upstream = ca8210_test_int_driver_write;
- ca8210_test_interface_init(priv);
+ ret = ca8210_test_interface_init(priv);
+ if (ret) {
+ dev_crit(&spi_device->dev, "ca8210_test_interface_init failed\n");
+ goto error;
+ }
} else {
cascoda_api_upstream = NULL;
}
diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
index 9adad59b8676..d247fe483c58 100644
--- a/drivers/net/mctp/mctp-i3c.c
+++ b/drivers/net/mctp/mctp-i3c.c
@@ -125,6 +125,8 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
xfer.data.in = skb_put(skb, mi->mrl);
+ /* Make sure netif_rx() is read in the same order as i3c. */
+ mutex_lock(&mi->lock);
rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1);
if (rc < 0)
goto err;
@@ -166,8 +168,10 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
stats->rx_dropped++;
}
+ mutex_unlock(&mi->lock);
return 0;
err:
+ mutex_unlock(&mi->lock);
kfree_skb(skb);
return rc;
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 7246a910728d..3059435af596 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -684,7 +684,9 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
if (ret < 0)
return ret;
- mask = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ val = 0;
+ mask = DW_VR_MII_DIG_CTRL1_2G5_EN | DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+
if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
val = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
index 69434fd13f96..68d0d9e92a22 100644
--- a/drivers/net/pfcp.c
+++ b/drivers/net/pfcp.c
@@ -206,8 +206,8 @@ static int pfcp_newlink(struct net *net, struct net_device *dev,
goto exit_del_pfcp_sock;
}
- pn = net_generic(dev_net(dev), pfcp_net_id);
- list_add_rcu(&pfcp->list, &pn->pfcp_dev_list);
+ pn = net_generic(net, pfcp_net_id);
+ list_add(&pfcp->list, &pn->pfcp_dev_list);
netdev_dbg(dev, "registered new PFCP interface\n");
@@ -224,7 +224,7 @@ static void pfcp_dellink(struct net_device *dev, struct list_head *head)
{
struct pfcp_dev *pfcp = netdev_priv(dev);
- list_del_rcu(&pfcp->list);
+ list_del(&pfcp->list);
unregister_netdevice_queue(dev, head);
}
@@ -247,11 +247,16 @@ static int __net_init pfcp_net_init(struct net *net)
static void __net_exit pfcp_net_exit(struct net *net)
{
struct pfcp_net *pn = net_generic(net, pfcp_net_id);
- struct pfcp_dev *pfcp;
+ struct pfcp_dev *pfcp, *pfcp_next;
+ struct net_device *dev;
LIST_HEAD(list);
rtnl_lock();
- list_for_each_entry(pfcp, &pn->pfcp_dev_list, list)
+ for_each_netdev(net, dev)
+ if (dev->rtnl_link_ops == &pfcp_link_ops)
+ pfcp_dellink(dev, &list);
+
+ list_for_each_entry_safe(pfcp, pfcp_next, &pn->pfcp_dev_list, list)
pfcp_dellink(pfcp->dev, &list);
unregister_netdevice_many(&list);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index c1f1c810e810..8770004d9b08 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -340,6 +340,15 @@ static int of_bus_default_flags_match(struct device_node *np)
return of_property_present(np, "#address-cells") && (of_bus_n_addr_cells(np) == 3);
}
+static int of_bus_default_match(struct device_node *np)
+{
+ /*
+ * Check for presence first since of_bus_n_addr_cells() will warn when
+ * walking parent nodes.
+ */
+ return of_property_present(np, "#address-cells");
+}
+
/*
* Array of bus specific translators
*/
@@ -384,7 +393,7 @@ static const struct of_bus of_busses[] = {
{
.name = "default",
.addresses = "reg",
- .match = NULL,
+ .match = of_bus_default_match,
.count_cells = of_bus_default_count_cells,
.map = of_bus_default_map,
.translate = of_bus_default_translate,
@@ -399,7 +408,6 @@ static const struct of_bus *of_match_bus(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(of_busses); i++)
if (!of_busses[i].match || of_busses[i].match(np))
return &of_busses[i];
- BUG();
return NULL;
}
@@ -521,6 +529,8 @@ static u64 __of_translate_address(struct device_node *node,
if (parent == NULL)
return OF_BAD_ADDR;
bus = of_match_bus(parent);
+ if (!bus)
+ return OF_BAD_ADDR;
/* Count address cells & copy address locally */
bus->count_cells(dev, &na, &ns);
@@ -564,6 +574,8 @@ static u64 __of_translate_address(struct device_node *node,
/* Get new parent bus and counts */
pbus = of_match_bus(parent);
+ if (!pbus)
+ return OF_BAD_ADDR;
pbus->count_cells(dev, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
pr_err("Bad cell count for %pOF\n", dev);
@@ -703,7 +715,7 @@ const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
/* match the parent's bus type */
bus = of_match_bus(parent);
- if (strcmp(bus->name, "pci") && (bar_no >= 0))
+ if (!bus || (strcmp(bus->name, "pci") && (bar_no >= 0)))
return NULL;
/* Get "reg" or "assigned-addresses" property */
diff --git a/drivers/of/unittest-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi
index fa39611071b3..cd310b26b50c 100644
--- a/drivers/of/unittest-data/tests-platform.dtsi
+++ b/drivers/of/unittest-data/tests-platform.dtsi
@@ -34,5 +34,18 @@
};
};
};
+
+ platform-tests-2 {
+ // No #address-cells or #size-cells
+ node {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ test-device@100 {
+ compatible = "test-sub-device";
+ reg = <0x100 1>;
+ };
+ };
+ };
};
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 438fd70fa995..0fa0c0fd9a6a 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1380,6 +1380,7 @@ static void __init of_unittest_bus_3cell_ranges(void)
static void __init of_unittest_reg(void)
{
struct device_node *np;
+ struct resource res;
int ret;
u64 addr, size;
@@ -1396,6 +1397,19 @@ static void __init of_unittest_reg(void)
np, addr);
of_node_put(np);
+
+ np = of_find_node_by_path("/testcase-data/platform-tests-2/node/test-device@100");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ unittest(ret == -EINVAL, "of_address_to_resource(%pOF) expected error on untranslatable address\n",
+ np);
+
+ of_node_put(np);
+
}
struct of_unittest_expected_res {
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
index b59cacc740fa..0a5e7efbce2c 100644
--- a/drivers/pci/pcie/bwctrl.c
+++ b/drivers/pci/pcie/bwctrl.c
@@ -303,14 +303,17 @@ static int pcie_bwnotif_probe(struct pcie_device *srv)
if (ret)
return ret;
- ret = devm_request_irq(&srv->device, srv->irq, pcie_bwnotif_irq,
- IRQF_SHARED, "PCIe bwctrl", srv);
- if (ret)
- return ret;
-
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- port->link_bwctrl = no_free_ptr(data);
+ port->link_bwctrl = data;
+
+ ret = request_irq(srv->irq, pcie_bwnotif_irq,
+ IRQF_SHARED, "PCIe bwctrl", srv);
+ if (ret) {
+ port->link_bwctrl = NULL;
+ return ret;
+ }
+
pcie_bwnotif_enable(srv);
}
}
@@ -331,11 +334,15 @@ static void pcie_bwnotif_remove(struct pcie_device *srv)
pcie_cooling_device_unregister(data->cdev);
- pcie_bwnotif_disable(srv->port);
+ scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
+ scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
+ pcie_bwnotif_disable(srv->port);
+
+ free_irq(srv->irq, srv);
- scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem)
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem)
srv->port->link_bwctrl = NULL;
+ }
+ }
}
static int pcie_bwnotif_suspend(struct pcie_device *srv)
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 1aa303f76cc7..194c153e5d71 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -507,8 +507,7 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
{
u32 type = event->attr.type;
u64 config = event->attr.config;
- u64 raw_config_val;
- int ret;
+ int ret = -ENOENT;
/*
* Ensure we are finished checking standard hardware events for
@@ -528,21 +527,23 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
case PERF_TYPE_RAW:
/*
* As per SBI specification, the upper 16 bits must be unused
- * for a raw event.
+ * for a hardware raw event.
* Bits 63:62 are used to distinguish between raw events
* 00 - Hardware raw event
* 10 - SBI firmware events
* 11 - Risc-V platform specific firmware event
*/
- raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
+
switch (config >> 62) {
case 0:
- ret = RISCV_PMU_RAW_EVENT_IDX;
- *econfig = raw_config_val;
+ /* Return error any bits [48-63] is set as it is not allowed by the spec */
+ if (!(config & ~RISCV_PMU_RAW_EVENT_MASK)) {
+ *econfig = config & RISCV_PMU_RAW_EVENT_MASK;
+ ret = RISCV_PMU_RAW_EVENT_IDX;
+ }
break;
case 2:
- ret = (raw_config_val & 0xFFFF) |
- (SBI_PMU_EVENT_TYPE_FW << 16);
+ ret = (config & 0xFFFF) | (SBI_PMU_EVENT_TYPE_FW << 16);
break;
case 3:
/*
@@ -551,12 +552,13 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
* Event data - raw event encoding
*/
ret = SBI_PMU_EVENT_TYPE_FW << 16 | RISCV_PLAT_FW_EVENT;
- *econfig = raw_config_val;
+ *econfig = config & RISCV_PMU_PLAT_FW_EVENT_MASK;
+ break;
+ default:
break;
}
break;
default:
- ret = -ENOENT;
break;
}
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index 26b878ee5191..a254debb9256 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -947,6 +947,10 @@ static int amd_pmc_suspend_handler(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ /*
+ * Must be called only from the same set of dev_pm_ops handlers
+ * as i8042_pm_suspend() is called: currently just from .suspend.
+ */
if (pdev->disable_8042_wakeup && !disable_workarounds) {
int rc = amd_pmc_wa_irq1(pdev);
@@ -959,7 +963,9 @@ static int amd_pmc_suspend_handler(struct device *dev)
return 0;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
+static const struct dev_pm_ops amd_pmc_pm = {
+ .suspend = amd_pmc_suspend_handler,
+};
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c
index 6e5dc7e3674f..bcc5c0f3bb4d 100644
--- a/drivers/platform/x86/dell/dell-uart-backlight.c
+++ b/drivers/platform/x86/dell/dell-uart-backlight.c
@@ -283,6 +283,9 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
init_waitqueue_head(&dell_bl->wait_queue);
dell_bl->dev = dev;
+ serdev_device_set_drvdata(serdev, dell_bl);
+ serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
+
ret = devm_serdev_device_open(dev, serdev);
if (ret)
return dev_err_probe(dev, ret, "opening UART device\n");
@@ -290,8 +293,6 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
/* 9600 bps, no flow control, these are the default but set them to be sure */
serdev_device_set_baudrate(serdev, 9600);
serdev_device_set_flow_control(serdev, false);
- serdev_device_set_drvdata(serdev, dell_bl);
- serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
get_version[0] = DELL_SOF(GET_CMD_LEN);
get_version[1] = CMD_GET_VERSION;
diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c
index 50ebfd586d3f..739569803017 100644
--- a/drivers/platform/x86/intel/pmc/core_ssram.c
+++ b/drivers/platform/x86/intel/pmc/core_ssram.c
@@ -269,8 +269,12 @@ pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, int pmc_idx, u32 offset)
/*
* The secondary PMC BARS (which are behind hidden PCI devices)
* are read from fixed offsets in MMIO of the primary PMC BAR.
+ * If a device is not present, the value will be 0.
*/
ssram_base = get_base(tmp_ssram, offset);
+ if (!ssram_base)
+ return 0;
+
ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
if (!ssram)
return -ENOMEM;
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 1e46e30dae96..dbcd3087aaa4 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -804,6 +804,7 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
static const struct x86_cpu_id isst_cpu_ids[] = {
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, SST_HPM_SUPPORTED),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, SST_HPM_SUPPORTED),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, SST_HPM_SUPPORTED),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, SST_HPM_SUPPORTED),
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
index 603e7ded06a9..2f01cd22a6ee 100644
--- a/drivers/platform/x86/intel/tpmi_power_domains.c
+++ b/drivers/platform/x86/intel/tpmi_power_domains.c
@@ -81,6 +81,7 @@ static const struct x86_cpu_id tpmi_cpu_ids[] = {
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, NULL),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, NULL),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, NULL),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, NULL),
X86_MATCH_VFM(INTEL_PANTHERCOVE_X, NULL),
{}
diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
index d2699ca24f34..a96b215cd2c5 100644
--- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
+++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
@@ -199,14 +199,15 @@ static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev)
if (ret)
return ret;
+ serdev_device_set_drvdata(serdev, fc);
+ serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
+
ret = devm_serdev_device_open(dev, serdev);
if (ret)
return dev_err_probe(dev, ret, "opening UART device\n");
serdev_device_set_baudrate(serdev, 600);
serdev_device_set_flow_control(serdev, false);
- serdev_device_set_drvdata(serdev, fc);
- serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
ret = devm_extcon_register_notifier_all(dev, fc->extcon, &fc->nb);
if (ret)
diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
index e3a0f64c144c..3668fe66b22c 100644
--- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
@@ -770,7 +770,7 @@ static void imx8mp_blk_ctrl_remove(struct platform_device *pdev)
of_genpd_del_provider(pdev->dev.of_node);
- for (i = 0; bc->onecell_data.num_domains; i++) {
+ for (i = 0; i < bc->onecell_data.num_domains; i++) {
struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
pm_genpd_remove(&domain->genpd);
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index 12d0535a874b..8a7f167e405e 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -176,6 +176,7 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
vdev->dev.parent = dev;
priv->vdev = vdev;
+ device_set_of_node_from_dev(&vdev->dev, dev);
error = platform_device_add(vdev);
if (error)
goto err_device_put;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index adee6f60c966..0cc6a0f77b09 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -210,6 +210,9 @@ static int scsi_check_passthrough(struct scsi_cmnd *scmd,
struct scsi_sense_hdr sshdr;
enum sam_status status;
+ if (!scmd->result)
+ return 0;
+
if (!failures)
return 0;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fde7de3b1e55..9b47f91c5b97 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -4104,7 +4104,7 @@ iscsi_if_rx(struct sk_buff *skb)
}
do {
/*
- * special case for GET_STATS:
+ * special case for GET_STATS, GET_CHAP and GET_HOST_STATS:
* on success - sending reply and stats from
* inside of if_recv_msg(),
* on error - fall through.
@@ -4113,6 +4113,8 @@ iscsi_if_rx(struct sk_buff *skb)
break;
if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
break;
+ if (ev->type == ISCSI_UEVENT_GET_HOST_STATS && !err)
+ break;
err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
ev, sizeof(*ev));
if (err == -EAGAIN && --retries < 0) {
diff --git a/drivers/staging/gpib/Kconfig b/drivers/staging/gpib/Kconfig
index 259f3ff33646..81510db3072e 100644
--- a/drivers/staging/gpib/Kconfig
+++ b/drivers/staging/gpib/Kconfig
@@ -65,6 +65,8 @@ config GPIB_NI_PCI_ISA
depends on ISA_BUS || PCI || PCMCIA
depends on HAS_IOPORT
depends on !X86_PAE
+ depends on PCMCIA || !PCMCIA
+ depends on HAS_IOPORT_MAP
select GPIB_COMMON
select GPIB_NEC7210
help
@@ -89,6 +91,7 @@ config GPIB_CB7210
depends on HAS_IOPORT
depends on ISA_BUS || PCI || PCMCIA
depends on !X86_PAE
+ depends on PCMCIA || !PCMCIA
select GPIB_COMMON
select GPIB_NEC7210
help
@@ -177,6 +180,7 @@ config GPIB_HP82341
config GPIB_INES
tristate "INES"
depends on PCI || ISA_BUS || PCMCIA
+ depends on PCMCIA || !PCMCIA
depends on HAS_IOPORT
depends on !X86_PAE
select GPIB_COMMON
@@ -199,8 +203,8 @@ config GPIB_INES
called cb7210.
config GPIB_PCMCIA
- bool "PCMCIA/Cardbus support for NI MC and Ines boards"
- depends on PCCARD && (GPIB_NI_PCI_ISA || GPIB_CB7210 || GPIB_INES)
+ def_bool y
+ depends on PCMCIA && (GPIB_NI_PCI_ISA || GPIB_CB7210 || GPIB_INES)
help
Enable PCMCIA/CArdbus support for National Instruments,
measurement computing boards and Ines boards.
diff --git a/drivers/staging/gpib/agilent_82350b/Makefile b/drivers/staging/gpib/agilent_82350b/Makefile
index d9236c92e04b..f24e1e713a63 100644
--- a/drivers/staging/gpib/agilent_82350b/Makefile
+++ b/drivers/staging/gpib/agilent_82350b/Makefile
@@ -1,2 +1,2 @@
-obj-m += agilent_82350b.o
+obj-$(CONFIG_GPIB_AGILENT_82350B) += agilent_82350b.o
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index 53006d0cc79c..8e2334fe5c9b 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -700,7 +700,7 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
GPIB_82350A_REGION));
dev_dbg(board->gpib_dev, "%s: gpib base address remapped to 0x%p\n",
driver_name, a_priv->gpib_base);
- tms_priv->iobase = a_priv->gpib_base + TMS9914_BASE_REG;
+ tms_priv->mmiobase = a_priv->gpib_base + TMS9914_BASE_REG;
a_priv->sram_base = ioremap(pci_resource_start(a_priv->pci_device,
SRAM_82350A_REGION),
pci_resource_len(a_priv->pci_device,
@@ -724,7 +724,7 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
pci_resource_len(a_priv->pci_device, GPIB_REGION));
dev_dbg(board->gpib_dev, "%s: gpib base address remapped to 0x%p\n",
driver_name, a_priv->gpib_base);
- tms_priv->iobase = a_priv->gpib_base + TMS9914_BASE_REG;
+ tms_priv->mmiobase = a_priv->gpib_base + TMS9914_BASE_REG;
a_priv->sram_base = ioremap(pci_resource_start(a_priv->pci_device, SRAM_REGION),
pci_resource_len(a_priv->pci_device, SRAM_REGION));
dev_dbg(board->gpib_dev, "%s: sram base address remapped to 0x%p\n",
diff --git a/drivers/staging/gpib/agilent_82357a/Makefile b/drivers/staging/gpib/agilent_82357a/Makefile
index 4a1d940fce2b..81a55c257a6e 100644
--- a/drivers/staging/gpib/agilent_82357a/Makefile
+++ b/drivers/staging/gpib/agilent_82357a/Makefile
@@ -1,4 +1,4 @@
-obj-m += agilent_82357a.o
+obj-$(CONFIG_GPIB_AGILENT_82357A) += agilent_82357a.o
diff --git a/drivers/staging/gpib/cb7210/Makefile b/drivers/staging/gpib/cb7210/Makefile
index 22e0214fc17d..cda0725d6487 100644
--- a/drivers/staging/gpib/cb7210/Makefile
+++ b/drivers/staging/gpib/cb7210/Makefile
@@ -1,4 +1,4 @@
ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
-obj-m += cb7210.o
+obj-$(CONFIG_GPIB_CB7210) += cb7210.o
diff --git a/drivers/staging/gpib/cb7210/cb7210.c b/drivers/staging/gpib/cb7210/cb7210.c
index 63df7f3eb3f3..59e41c97f518 100644
--- a/drivers/staging/gpib/cb7210/cb7210.c
+++ b/drivers/staging/gpib/cb7210/cb7210.c
@@ -971,12 +971,12 @@ int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
switch (cb_priv->pci_chip) {
case PCI_CHIP_AMCC_S5933:
cb_priv->amcc_iobase = pci_resource_start(cb_priv->pci_device, 0);
- nec_priv->iobase = (void *)(pci_resource_start(cb_priv->pci_device, 1));
+ nec_priv->iobase = pci_resource_start(cb_priv->pci_device, 1);
cb_priv->fifo_iobase = pci_resource_start(cb_priv->pci_device, 2);
break;
case PCI_CHIP_QUANCOM:
- nec_priv->iobase = (void *)(pci_resource_start(cb_priv->pci_device, 0));
- cb_priv->fifo_iobase = (unsigned long)nec_priv->iobase;
+ nec_priv->iobase = pci_resource_start(cb_priv->pci_device, 0);
+ cb_priv->fifo_iobase = nec_priv->iobase;
break;
default:
pr_err("cb7210: bug! unhandled pci_chip=%i\n", cb_priv->pci_chip);
@@ -1040,8 +1040,8 @@ int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
return retval;
cb_priv = board->private_data;
nec_priv = &cb_priv->nec7210_priv;
- if (request_region((unsigned long)config->ibbase, cb7210_iosize, "cb7210") == 0) {
- pr_err("gpib: ioports starting at 0x%p are already in use\n", config->ibbase);
+ if (request_region(config->ibbase, cb7210_iosize, "cb7210") == 0) {
+ pr_err("gpib: ioports starting at 0x%u are already in use\n", config->ibbase);
return -EIO;
}
nec_priv->iobase = config->ibbase;
@@ -1471,7 +1471,7 @@ int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
(unsigned long)curr_dev->resource[0]->start);
return -EIO;
}
- nec_priv->iobase = (void *)(unsigned long)curr_dev->resource[0]->start;
+ nec_priv->iobase = curr_dev->resource[0]->start;
cb_priv->fifo_iobase = curr_dev->resource[0]->start;
if (request_irq(curr_dev->irq, cb7210_interrupt, IRQF_SHARED,
diff --git a/drivers/staging/gpib/cb7210/cb7210.h b/drivers/staging/gpib/cb7210/cb7210.h
index 4ad976de2b68..c17cb22585f7 100644
--- a/drivers/staging/gpib/cb7210/cb7210.h
+++ b/drivers/staging/gpib/cb7210/cb7210.h
@@ -113,9 +113,9 @@ enum hs_regs {
HS_STATUS = 0x8, /* HS_STATUS register */
};
-static inline unsigned long nec7210_iobase(const struct cb7210_priv *cb_priv)
+static inline u32 nec7210_iobase(const struct cb7210_priv *cb_priv)
{
- return (unsigned long)(cb_priv->nec7210_priv.iobase);
+ return cb_priv->nec7210_priv.iobase;
}
static inline int cb7210_page_in_bits(unsigned int page)
diff --git a/drivers/staging/gpib/cec/Makefile b/drivers/staging/gpib/cec/Makefile
index f4638628ff29..b7141e23d4e0 100644
--- a/drivers/staging/gpib/cec/Makefile
+++ b/drivers/staging/gpib/cec/Makefile
@@ -1,3 +1,3 @@
-obj-m += cec_gpib.o
+obj-$(CONFIG_GPIB_CEC_PCI) += cec_gpib.o
diff --git a/drivers/staging/gpib/cec/cec_gpib.c b/drivers/staging/gpib/cec/cec_gpib.c
index 3dc933deb401..9c00a874468c 100644
--- a/drivers/staging/gpib/cec/cec_gpib.c
+++ b/drivers/staging/gpib/cec/cec_gpib.c
@@ -297,8 +297,8 @@ int cec_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
cec_priv->plx_iobase = pci_resource_start(cec_priv->pci_device, 1);
pr_info(" plx9050 base address 0x%lx\n", cec_priv->plx_iobase);
- nec_priv->iobase = (void *)(pci_resource_start(cec_priv->pci_device, 3));
- pr_info(" nec7210 base address 0x%p\n", nec_priv->iobase);
+ nec_priv->iobase = pci_resource_start(cec_priv->pci_device, 3);
+ pr_info(" nec7210 base address 0x%x\n", nec_priv->iobase);
isr_flags |= IRQF_SHARED;
if (request_irq(cec_priv->pci_device->irq, cec_interrupt, isr_flags, "pci-gpib", board)) {
diff --git a/drivers/staging/gpib/common/gpib_os.c b/drivers/staging/gpib/common/gpib_os.c
index 405237d8cb47..0962729d7dfe 100644
--- a/drivers/staging/gpib/common/gpib_os.c
+++ b/drivers/staging/gpib/common/gpib_os.c
@@ -116,56 +116,6 @@ int io_timed_out(gpib_board_t *board)
return 0;
}
-void writeb_wrapper(unsigned int value, void *address)
-{
- writeb(value, address);
-};
-EXPORT_SYMBOL(writeb_wrapper);
-
-void writew_wrapper(unsigned int value, void *address)
-{
- writew(value, address);
-};
-EXPORT_SYMBOL(writew_wrapper);
-
-unsigned int readb_wrapper(void *address)
-{
- return readb(address);
-};
-EXPORT_SYMBOL(readb_wrapper);
-
-unsigned int readw_wrapper(void *address)
-{
- return readw(address);
-};
-EXPORT_SYMBOL(readw_wrapper);
-
-#ifdef CONFIG_HAS_IOPORT
-void outb_wrapper(unsigned int value, void *address)
-{
- outb(value, (unsigned long)(address));
-};
-EXPORT_SYMBOL(outb_wrapper);
-
-void outw_wrapper(unsigned int value, void *address)
-{
- outw(value, (unsigned long)(address));
-};
-EXPORT_SYMBOL(outw_wrapper);
-
-unsigned int inb_wrapper(void *address)
-{
- return inb((unsigned long)(address));
-};
-EXPORT_SYMBOL(inb_wrapper);
-
-unsigned int inw_wrapper(void *address)
-{
- return inw((unsigned long)(address));
-};
-EXPORT_SYMBOL(inw_wrapper);
-#endif
-
/* this is a function instead of a constant because of Suse
* defining HZ to be a function call to get_hz()
*/
@@ -536,7 +486,7 @@ int dvrsp(gpib_board_t *board, unsigned int pad, int sad,
return -1;
}
- if (pad > MAX_GPIB_PRIMARY_ADDRESS || sad > MAX_GPIB_SECONDARY_ADDRESS) {
+ if (pad > MAX_GPIB_PRIMARY_ADDRESS || sad > MAX_GPIB_SECONDARY_ADDRESS || sad < -1) {
pr_err("gpib: bad address for serial poll");
return -1;
}
@@ -1623,7 +1573,7 @@ static int iobase_ioctl(gpib_board_config_t *config, unsigned long arg)
if (WARN_ON_ONCE(sizeof(void *) > sizeof(base_addr)))
return -EFAULT;
- config->ibbase = (void *)(unsigned long)(base_addr);
+ config->ibbase = base_addr;
return 0;
}
diff --git a/drivers/staging/gpib/eastwood/Makefile b/drivers/staging/gpib/eastwood/Makefile
index c74056f959d0..384825195f77 100644
--- a/drivers/staging/gpib/eastwood/Makefile
+++ b/drivers/staging/gpib/eastwood/Makefile
@@ -1,3 +1,3 @@
-obj-m += fluke_gpib.o
+obj-$(CONFIG_GPIB_FLUKE) += fluke_gpib.o
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.c b/drivers/staging/gpib/eastwood/fluke_gpib.c
index 3f938ab0c84d..5e59d38beb35 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.c
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.c
@@ -1011,12 +1011,12 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
}
e_priv->gpib_iomem_res = res;
- nec_priv->iobase = ioremap(e_priv->gpib_iomem_res->start,
+ nec_priv->mmiobase = ioremap(e_priv->gpib_iomem_res->start,
resource_size(e_priv->gpib_iomem_res));
- pr_info("gpib: iobase %lx remapped to %p, length=%d\n",
- (unsigned long)e_priv->gpib_iomem_res->start,
- nec_priv->iobase, (int)resource_size(e_priv->gpib_iomem_res));
- if (!nec_priv->iobase) {
+ pr_info("gpib: mmiobase %llx remapped to %p, length=%d\n",
+ (u64)e_priv->gpib_iomem_res->start,
+ nec_priv->mmiobase, (int)resource_size(e_priv->gpib_iomem_res));
+ if (!nec_priv->mmiobase) {
dev_err(&fluke_gpib_pdev->dev, "Could not map I/O memory\n");
return -ENOMEM;
}
@@ -1107,7 +1107,7 @@ void fluke_detach(gpib_board_t *board)
gpib_free_pseudo_irq(board);
nec_priv = &e_priv->nec7210_priv;
- if (nec_priv->iobase) {
+ if (nec_priv->mmiobase) {
fluke_paged_write_byte(e_priv, 0, ISR0_IMR0, ISR0_IMR0_PAGE);
nec7210_board_reset(nec_priv, board);
}
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.h b/drivers/staging/gpib/eastwood/fluke_gpib.h
index fcbd42f8f9af..4e2144d45270 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.h
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.h
@@ -72,7 +72,7 @@ static inline uint8_t fluke_read_byte_nolock(struct nec7210_priv *nec_priv,
{
u8 retval;
- retval = readl(nec_priv->iobase + register_num * nec_priv->offset);
+ retval = readl(nec_priv->mmiobase + register_num * nec_priv->offset);
return retval;
}
@@ -80,7 +80,7 @@ static inline uint8_t fluke_read_byte_nolock(struct nec7210_priv *nec_priv,
static inline void fluke_write_byte_nolock(struct nec7210_priv *nec_priv, uint8_t data,
int register_num)
{
- writel(data, nec_priv->iobase + register_num * nec_priv->offset);
+ writel(data, nec_priv->mmiobase + register_num * nec_priv->offset);
}
static inline uint8_t fluke_paged_read_byte(struct fluke_priv *e_priv,
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
index 62791db1c34a..0662b20a45e7 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GPIB Driver for fmh_gpib_core");
+MODULE_AUTHOR("Frank Mori Hess <fmh6jj@gmail.com>");
static irqreturn_t fmh_gpib_interrupt(int irq, void *arg);
static int fmh_gpib_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config);
@@ -1419,15 +1421,14 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
}
e_priv->gpib_iomem_res = res;
- nec_priv->iobase = ioremap(e_priv->gpib_iomem_res->start,
+ nec_priv->mmiobase = ioremap(e_priv->gpib_iomem_res->start,
resource_size(e_priv->gpib_iomem_res));
- if (!nec_priv->iobase) {
+ if (!nec_priv->mmiobase) {
dev_err(board->dev, "Could not map I/O memory for gpib\n");
return -ENOMEM;
}
- dev_info(board->dev, "iobase 0x%lx remapped to %p, length=%ld\n",
- (unsigned long)e_priv->gpib_iomem_res->start,
- nec_priv->iobase, (unsigned long)resource_size(e_priv->gpib_iomem_res));
+ dev_info(board->dev, "iobase %pr remapped to %p\n",
+ e_priv->gpib_iomem_res, nec_priv->mmiobase);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma_fifos");
if (!res) {
@@ -1507,14 +1508,14 @@ void fmh_gpib_detach(gpib_board_t *board)
free_irq(e_priv->irq, board);
if (e_priv->fifo_base)
fifos_write(e_priv, 0, FIFO_CONTROL_STATUS_REG);
- if (nec_priv->iobase) {
+ if (nec_priv->mmiobase) {
write_byte(nec_priv, 0, ISR0_IMR0_REG);
nec7210_board_reset(nec_priv, board);
}
if (e_priv->fifo_base)
iounmap(e_priv->fifo_base);
- if (nec_priv->iobase)
- iounmap(nec_priv->iobase);
+ if (nec_priv->mmiobase)
+ iounmap(nec_priv->mmiobase);
if (e_priv->dma_port_res) {
release_mem_region(e_priv->dma_port_res->start,
resource_size(e_priv->dma_port_res));
@@ -1564,12 +1565,12 @@ static int fmh_gpib_pci_attach_impl(gpib_board_t *board, const gpib_board_config
e_priv->gpib_iomem_res = &pci_device->resource[gpib_control_status_pci_resource_index];
e_priv->dma_port_res = &pci_device->resource[gpib_fifo_pci_resource_index];
- nec_priv->iobase = ioremap(pci_resource_start(pci_device,
+ nec_priv->mmiobase = ioremap(pci_resource_start(pci_device,
gpib_control_status_pci_resource_index),
pci_resource_len(pci_device,
gpib_control_status_pci_resource_index));
dev_info(board->dev, "base address for gpib control/status registers remapped to 0x%p\n",
- nec_priv->iobase);
+ nec_priv->mmiobase);
if (e_priv->dma_port_res->flags & IORESOURCE_MEM) {
e_priv->fifo_base = ioremap(pci_resource_start(pci_device,
@@ -1632,14 +1633,14 @@ void fmh_gpib_pci_detach(gpib_board_t *board)
free_irq(e_priv->irq, board);
if (e_priv->fifo_base)
fifos_write(e_priv, 0, FIFO_CONTROL_STATUS_REG);
- if (nec_priv->iobase) {
+ if (nec_priv->mmiobase) {
write_byte(nec_priv, 0, ISR0_IMR0_REG);
nec7210_board_reset(nec_priv, board);
}
if (e_priv->fifo_base)
iounmap(e_priv->fifo_base);
- if (nec_priv->iobase)
- iounmap(nec_priv->iobase);
+ if (nec_priv->mmiobase)
+ iounmap(nec_priv->mmiobase);
if (e_priv->dma_port_res || e_priv->gpib_iomem_res)
pci_release_regions(to_pci_dev(board->dev));
if (board->dev)
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h b/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
index 43bfc89d2a6f..60b1bd6d3c15 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
@@ -127,13 +127,13 @@ static const unsigned int fifo_max_burst_length_mask = 0x00ff;
static inline uint8_t gpib_cs_read_byte(struct nec7210_priv *nec_priv,
unsigned int register_num)
{
- return readb(nec_priv->iobase + register_num * nec_priv->offset);
+ return readb(nec_priv->mmiobase + register_num * nec_priv->offset);
}
static inline void gpib_cs_write_byte(struct nec7210_priv *nec_priv, uint8_t data,
unsigned int register_num)
{
- writeb(data, nec_priv->iobase + register_num * nec_priv->offset);
+ writeb(data, nec_priv->mmiobase + register_num * nec_priv->offset);
}
static inline uint16_t fifos_read(struct fmh_priv *fmh_priv, int register_num)
diff --git a/drivers/staging/gpib/gpio/Makefile b/drivers/staging/gpib/gpio/Makefile
index a31ded6e5924..00ea52abdda7 100644
--- a/drivers/staging/gpib/gpio/Makefile
+++ b/drivers/staging/gpib/gpio/Makefile
@@ -1,4 +1,4 @@
-obj-m += gpib_bitbang.o
+obj-$(CONFIG_GPIB_GPIO) += gpib_bitbang.o
diff --git a/drivers/staging/gpib/gpio/gpib_bitbang.c b/drivers/staging/gpib/gpio/gpib_bitbang.c
index a2d562cbd65b..23550502e012 100644
--- a/drivers/staging/gpib/gpio/gpib_bitbang.c
+++ b/drivers/staging/gpib/gpio/gpib_bitbang.c
@@ -315,7 +315,7 @@ struct bb_priv {
enum listener_function_state listener_state;
};
-inline long usec_diff(struct timespec64 *a, struct timespec64 *b);
+static inline long usec_diff(struct timespec64 *a, struct timespec64 *b);
static void bb_buffer_print(unsigned char *buffer, size_t length, int cmd, int eoi);
static void set_data_lines(u8 byte);
static u8 get_data_lines(void);
diff --git a/drivers/staging/gpib/hp_82335/Makefile b/drivers/staging/gpib/hp_82335/Makefile
index 8b7a552e9355..305ce44ee48a 100644
--- a/drivers/staging/gpib/hp_82335/Makefile
+++ b/drivers/staging/gpib/hp_82335/Makefile
@@ -1,4 +1,4 @@
-obj-m += hp82335.o
+obj-$(CONFIG_GPIB_HP82335) += hp82335.o
diff --git a/drivers/staging/gpib/hp_82335/hp82335.c b/drivers/staging/gpib/hp_82335/hp82335.c
index 40afe42aea47..ea78143c7ab6 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.c
+++ b/drivers/staging/gpib/hp_82335/hp82335.c
@@ -9,6 +9,7 @@
*/
#include "hp82335.h"
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -233,7 +234,7 @@ static void hp82335_clear_interrupt(struct hp82335_priv *hp_priv)
{
struct tms9914_priv *tms_priv = &hp_priv->tms9914_priv;
- writeb(0, tms_priv->iobase + HPREG_INTR_CLEAR);
+ writeb(0, tms_priv->mmiobase + HPREG_INTR_CLEAR);
}
int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
@@ -241,7 +242,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
struct hp82335_priv *hp_priv;
struct tms9914_priv *tms_priv;
int retval;
- const unsigned long upper_iomem_base = (unsigned long)config->ibbase + hp82335_rom_size;
+ const unsigned long upper_iomem_base = config->ibbase + hp82335_rom_size;
board->status = 0;
@@ -253,7 +254,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
tms_priv->write_byte = hp82335_write_byte;
tms_priv->offset = 1;
- switch ((unsigned long)(config->ibbase)) {
+ switch (config->ibbase) {
case 0xc4000:
case 0xc8000:
case 0xcc000:
@@ -271,7 +272,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
case 0xfc000:
break;
default:
- pr_err("hp82335: invalid base io address 0x%p\n", config->ibbase);
+ pr_err("hp82335: invalid base io address 0x%u\n", config->ibbase);
return -EINVAL;
}
if (!request_mem_region(upper_iomem_base, hp82335_upper_iomem_size, "hp82335")) {
@@ -280,9 +281,9 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
return -EBUSY;
}
hp_priv->raw_iobase = upper_iomem_base;
- tms_priv->iobase = ioremap(upper_iomem_base, hp82335_upper_iomem_size);
+ tms_priv->mmiobase = ioremap(upper_iomem_base, hp82335_upper_iomem_size);
pr_info("hp82335: upper half of 82335 iomem region 0x%lx remapped to 0x%p\n",
- hp_priv->raw_iobase, tms_priv->iobase);
+ hp_priv->raw_iobase, tms_priv->mmiobase);
retval = request_irq(config->ibirq, hp82335_interrupt, 0, "hp82335", board);
if (retval) {
@@ -296,7 +297,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
hp82335_clear_interrupt(hp_priv);
- writeb(INTR_ENABLE, tms_priv->iobase + HPREG_CCR);
+ writeb(INTR_ENABLE, tms_priv->mmiobase + HPREG_CCR);
tms9914_online(board, tms_priv);
@@ -312,10 +313,10 @@ void hp82335_detach(gpib_board_t *board)
tms_priv = &hp_priv->tms9914_priv;
if (hp_priv->irq)
free_irq(hp_priv->irq, board);
- if (tms_priv->iobase) {
- writeb(0, tms_priv->iobase + HPREG_CCR);
+ if (tms_priv->mmiobase) {
+ writeb(0, tms_priv->mmiobase + HPREG_CCR);
tms9914_board_reset(tms_priv);
- iounmap((void *)tms_priv->iobase);
+ iounmap(tms_priv->mmiobase);
}
if (hp_priv->raw_iobase)
release_mem_region(hp_priv->raw_iobase, hp82335_upper_iomem_size);
diff --git a/drivers/staging/gpib/hp_82341/Makefile b/drivers/staging/gpib/hp_82341/Makefile
index 1fe7db4f8ca4..21367310a17e 100644
--- a/drivers/staging/gpib/hp_82341/Makefile
+++ b/drivers/staging/gpib/hp_82341/Makefile
@@ -1,2 +1,2 @@
-obj-m += hp_82341.o
+obj-$(CONFIG_GPIB_HP82341) += hp_82341.o
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.c b/drivers/staging/gpib/hp_82341/hp_82341.c
index 8ad1c885a9fb..71d481e88bd9 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.c
+++ b/drivers/staging/gpib/hp_82341/hp_82341.c
@@ -473,12 +473,12 @@ void hp_82341_free_private(gpib_board_t *board)
static uint8_t hp_82341_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
- return inb((unsigned long)(priv->iobase) + register_num);
+ return inb(priv->iobase + register_num);
}
static void hp_82341_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
{
- outb(data, (unsigned long)(priv->iobase) + register_num);
+ outb(data, priv->iobase + register_num);
}
static int hp_82341_find_isapnp_board(struct pnp_dev **dev)
@@ -682,8 +682,8 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
{
struct hp_82341_priv *hp_priv;
struct tms9914_priv *tms_priv;
- unsigned long start_addr;
- void *iobase;
+ u32 start_addr;
+ u32 iobase;
int irq;
int i;
int retval;
@@ -704,7 +704,7 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (retval < 0)
return retval;
hp_priv->pnp_dev = dev;
- iobase = (void *)(pnp_port_start(dev, 0));
+ iobase = pnp_port_start(dev, 0);
irq = pnp_irq(dev, 0);
hp_priv->hw_version = HW_VERSION_82341D;
hp_priv->io_region_offset = 0x8;
@@ -714,9 +714,9 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
hp_priv->hw_version = HW_VERSION_82341C;
hp_priv->io_region_offset = 0x400;
}
- pr_info("hp_82341: base io 0x%p\n", iobase);
+ pr_info("hp_82341: base io 0x%u\n", iobase);
for (i = 0; i < hp_82341_num_io_regions; ++i) {
- start_addr = (unsigned long)(iobase) + i * hp_priv->io_region_offset;
+ start_addr = iobase + i * hp_priv->io_region_offset;
if (!request_region(start_addr, hp_82341_region_iosize, "hp_82341")) {
pr_err("hp_82341: failed to allocate io ports 0x%lx-0x%lx\n",
start_addr,
@@ -725,7 +725,7 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
}
hp_priv->iobase[i] = start_addr;
}
- tms_priv->iobase = (void *)(hp_priv->iobase[2]);
+ tms_priv->iobase = hp_priv->iobase[2];
if (hp_priv->hw_version == HW_VERSION_82341D) {
retval = isapnp_cfg_begin(hp_priv->pnp_dev->card->number,
hp_priv->pnp_dev->number);
diff --git a/drivers/staging/gpib/include/gpibP.h b/drivers/staging/gpib/include/gpibP.h
index 5fc42b645ab7..b97da577ba33 100644
--- a/drivers/staging/gpib/include/gpibP.h
+++ b/drivers/staging/gpib/include/gpibP.h
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
void gpib_register_driver(gpib_interface_t *interface, struct module *mod);
void gpib_unregister_driver(gpib_interface_t *interface);
@@ -35,16 +36,5 @@ extern gpib_board_t board_array[GPIB_MAX_NUM_BOARDS];
extern struct list_head registered_drivers;
-#include <linux/io.h>
-
-void writeb_wrapper(unsigned int value, void *address);
-unsigned int readb_wrapper(void *address);
-void outb_wrapper(unsigned int value, void *address);
-unsigned int inb_wrapper(void *address);
-void writew_wrapper(unsigned int value, void *address);
-unsigned int readw_wrapper(void *address);
-void outw_wrapper(unsigned int value, void *address);
-unsigned int inw_wrapper(void *address);
-
#endif // _GPIB_P_H
diff --git a/drivers/staging/gpib/include/gpib_types.h b/drivers/staging/gpib/include/gpib_types.h
index ee2643da6d71..b41781a55a60 100644
--- a/drivers/staging/gpib/include/gpib_types.h
+++ b/drivers/staging/gpib/include/gpib_types.h
@@ -31,7 +31,8 @@ typedef struct {
void *init_data;
int init_data_length;
/* IO base address to use for non-pnp cards (set by core, driver should make local copy) */
- void *ibbase;
+ u32 ibbase;
+ void __iomem *mmibbase;
/* IRQ to use for non-pnp cards (set by core, driver should make local copy) */
unsigned int ibirq;
/* dma channel to use for non-pnp cards (set by core, driver should make local copy) */
diff --git a/drivers/staging/gpib/include/nec7210.h b/drivers/staging/gpib/include/nec7210.h
index c00aba4ce846..ca998c4a84bf 100644
--- a/drivers/staging/gpib/include/nec7210.h
+++ b/drivers/staging/gpib/include/nec7210.h
@@ -18,7 +18,10 @@
/* struct used to provide variables local to a nec7210 chip */
struct nec7210_priv {
- void *iobase;
+#ifdef CONFIG_HAS_IOPORT
+ u32 iobase;
+#endif
+ void __iomem *mmiobase;
unsigned int offset; // offset between successive nec7210 io addresses
unsigned int dma_channel;
u8 *dma_buffer;
diff --git a/drivers/staging/gpib/include/tms9914.h b/drivers/staging/gpib/include/tms9914.h
index 456b488212d2..d8c8d1c9b131 100644
--- a/drivers/staging/gpib/include/tms9914.h
+++ b/drivers/staging/gpib/include/tms9914.h
@@ -20,7 +20,10 @@ enum tms9914_holdoff_mode {
/* struct used to provide variables local to a tms9914 chip */
struct tms9914_priv {
- void *iobase;
+#ifdef CONFIG_HAS_IOPORT
+ u32 iobase;
+#endif
+ void __iomem *mmiobase;
unsigned int offset; // offset between successive tms9914 io addresses
unsigned int dma_channel;
// software copy of bits written to interrupt mask registers
diff --git a/drivers/staging/gpib/ines/Makefile b/drivers/staging/gpib/ines/Makefile
index cdcaa59a4e39..6b6e480fd811 100644
--- a/drivers/staging/gpib/ines/Makefile
+++ b/drivers/staging/gpib/ines/Makefile
@@ -1,4 +1,4 @@
ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
-obj-m += ines_gpib.o
+obj-$(CONFIG_GPIB_INES) += ines_gpib.o
diff --git a/drivers/staging/gpib/ines/ines.h b/drivers/staging/gpib/ines/ines.h
index 7e8302619998..eed038fd3f28 100644
--- a/drivers/staging/gpib/ines/ines.h
+++ b/drivers/staging/gpib/ines/ines.h
@@ -83,14 +83,14 @@ void ines_set_xfer_counter(struct ines_priv *priv, unsigned int count);
/* inb/outb wrappers */
static inline unsigned int ines_inb(struct ines_priv *priv, unsigned int register_number)
{
- return inb((unsigned long)(priv->nec7210_priv.iobase) +
+ return inb(priv->nec7210_priv.iobase +
register_number * priv->nec7210_priv.offset);
}
static inline void ines_outb(struct ines_priv *priv, unsigned int value,
unsigned int register_number)
{
- outb(value, (unsigned long)(priv->nec7210_priv.iobase) +
+ outb(value, priv->nec7210_priv.iobase +
register_number * priv->nec7210_priv.offset);
}
diff --git a/drivers/staging/gpib/ines/ines_gpib.c b/drivers/staging/gpib/ines/ines_gpib.c
index 9d8387c3bf01..e18455ba842f 100644
--- a/drivers/staging/gpib/ines/ines_gpib.c
+++ b/drivers/staging/gpib/ines/ines_gpib.c
@@ -273,10 +273,10 @@ irqreturn_t ines_pci_interrupt(int irq, void *arg)
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
if (priv->pci_chip_type == PCI_CHIP_QUANCOM) {
- if ((inb((unsigned long)nec_priv->iobase +
+ if ((inb(nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG) &
QUANCOM_IRQ_ASSERTED_BIT))
- outb(QUANCOM_IRQ_ENABLE_BIT, (unsigned long)(nec_priv->iobase) +
+ outb(QUANCOM_IRQ_ENABLE_BIT, nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG);
}
@@ -780,8 +780,8 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
if (pci_request_regions(ines_priv->pci_device, "ines-gpib"))
return -1;
- nec_priv->iobase = (void *)(pci_resource_start(ines_priv->pci_device,
- found_id.gpib_region));
+ nec_priv->iobase = pci_resource_start(ines_priv->pci_device,
+ found_id.gpib_region);
ines_priv->pci_chip_type = found_id.pci_chip_type;
nec_priv->offset = found_id.io_offset;
@@ -840,7 +840,7 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
}
break;
case PCI_CHIP_QUANCOM:
- outb(QUANCOM_IRQ_ENABLE_BIT, (unsigned long)(nec_priv->iobase) +
+ outb(QUANCOM_IRQ_ENABLE_BIT, nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG);
break;
case PCI_CHIP_QUICKLOGIC5030:
@@ -899,8 +899,8 @@ int ines_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
ines_priv = board->private_data;
nec_priv = &ines_priv->nec7210_priv;
- if (!request_region((unsigned long)config->ibbase, ines_isa_iosize, "ines_gpib")) {
- pr_err("ines_gpib: ioports at 0x%p already in use\n", config->ibbase);
+ if (!request_region(config->ibbase, ines_isa_iosize, "ines_gpib")) {
+ pr_err("ines_gpib: ioports at 0x%x already in use\n", config->ibbase);
return -1;
}
nec_priv->iobase = config->ibbase;
@@ -931,7 +931,7 @@ void ines_pci_detach(gpib_board_t *board)
break;
case PCI_CHIP_QUANCOM:
if (nec_priv->iobase)
- outb(0, (unsigned long)(nec_priv->iobase) +
+ outb(0, nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG);
break;
default:
@@ -960,7 +960,7 @@ void ines_isa_detach(gpib_board_t *board)
free_irq(ines_priv->irq, board);
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
- release_region((unsigned long)(nec_priv->iobase), ines_isa_iosize);
+ release_region(nec_priv->iobase, ines_isa_iosize);
}
}
ines_free_private(board);
@@ -1355,7 +1355,7 @@ int ines_common_pcmcia_attach(gpib_board_t *board)
return -1;
}
- nec_priv->iobase = (void *)(unsigned long)curr_dev->resource[0]->start;
+ nec_priv->iobase = curr_dev->resource[0]->start;
nec7210_board_reset(nec_priv, board);
@@ -1410,7 +1410,7 @@ void ines_pcmcia_detach(gpib_board_t *board)
free_irq(ines_priv->irq, board);
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
- release_region((unsigned long)(nec_priv->iobase), ines_pcmcia_iosize);
+ release_region(nec_priv->iobase, ines_pcmcia_iosize);
}
}
ines_free_private(board);
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/Makefile b/drivers/staging/gpib/lpvo_usb_gpib/Makefile
index 137511acce63..360553488e6d 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/Makefile
+++ b/drivers/staging/gpib/lpvo_usb_gpib/Makefile
@@ -1,3 +1,3 @@
-obj-m += lpvo_usb_gpib.o
+obj-$(CONFIG_GPIB_LPVO) += lpvo_usb_gpib.o
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
index 267651a15fa0..93a05c792816 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
+++ b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
@@ -99,8 +99,8 @@ module_param(debug, int, 0644);
#define USB_GPIB_DEBUG_ON "\nIBDE\xAA\n"
#define USB_GPIB_SET_LISTEN "\nIBDT0\n"
#define USB_GPIB_SET_TALK "\nIBDT1\n"
-#define USB_GPIB_SET_LINES "\nIBDC\n"
-#define USB_GPIB_SET_DATA "\nIBDM\n"
+#define USB_GPIB_SET_LINES "\nIBDC.\n"
+#define USB_GPIB_SET_DATA "\nIBDM.\n"
#define USB_GPIB_READ_LINES "\nIBD?C\n"
#define USB_GPIB_READ_DATA "\nIBD?M\n"
#define USB_GPIB_READ_BUS "\nIBD??\n"
@@ -210,7 +210,7 @@ static int skel_do_release(gpib_board_t *);
* (unix time in sec and NANOsec)
*/
-inline int usec_diff(struct timespec64 *a, struct timespec64 *b)
+static inline int usec_diff(struct timespec64 *a, struct timespec64 *b)
{
return ((a->tv_sec - b->tv_sec) * 1000000 +
(a->tv_nsec - b->tv_nsec) / 1000);
@@ -436,7 +436,7 @@ static void set_timeout(gpib_board_t *board)
static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *config)
{
int retval, j;
- int base = (long)config->ibbase;
+ u32 base = config->ibbase;
char *device_path;
int match;
struct usb_device *udev;
@@ -589,7 +589,7 @@ static int usb_gpib_command(gpib_board_t *board,
size_t *bytes_written)
{
int i, retval;
- char command[6] = "IBc\n";
+ char command[6] = "IBc.\n";
DIA_LOG(1, "enter %p\n", board);
@@ -608,7 +608,7 @@ static int usb_gpib_command(gpib_board_t *board,
}
/**
- * disable_eos() - Disable END on eos byte (END on EOI only)
+ * usb_gpib_disable_eos() - Disable END on eos byte (END on EOI only)
*
* @board: the gpib_board data area for this gpib interface
*
@@ -624,7 +624,7 @@ static void usb_gpib_disable_eos(gpib_board_t *board)
}
/**
- * enable_eos() - Enable END for reads when eos byte is received.
+ * usb_gpib_enable_eos() - Enable END for reads when eos byte is received.
*
* @board: the gpib_board data area for this gpib interface
* @eos_byte: the 'eos' byte
@@ -647,7 +647,7 @@ static int usb_gpib_enable_eos(gpib_board_t *board,
}
/**
- * go_to_standby() - De-assert ATN
+ * usb_gpib_go_to_standby() - De-assert ATN
*
* @board: the gpib_board data area for this gpib interface
*/
@@ -664,7 +664,7 @@ static int usb_gpib_go_to_standby(gpib_board_t *board)
}
/**
- * interface_clear() - Assert or de-assert IFC
+ * usb_gpib_interface_clear() - Assert or de-assert IFC
*
* @board: the gpib_board data area for this gpib interface
* assert: 1: assert IFC; 0: de-assert IFC
diff --git a/drivers/staging/gpib/nec7210/nec7210.c b/drivers/staging/gpib/nec7210/nec7210.c
index 1d9951035497..c9a837fad96e 100644
--- a/drivers/staging/gpib/nec7210/nec7210.c
+++ b/drivers/staging/gpib/nec7210/nec7210.c
@@ -1035,7 +1035,7 @@ EXPORT_SYMBOL(nec7210_board_online);
/* wrappers for io */
uint8_t nec7210_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num)
{
- return inb((unsigned long)(priv->iobase) + register_num * priv->offset);
+ return inb(priv->iobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_ioport_read_byte);
@@ -1047,7 +1047,7 @@ void nec7210_ioport_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned
*/
nec7210_locking_ioport_write_byte(priv, data, register_num);
else
- outb(data, (unsigned long)(priv->iobase) + register_num * priv->offset);
+ outb(data, priv->iobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_ioport_write_byte);
@@ -1058,7 +1058,7 @@ uint8_t nec7210_locking_ioport_read_byte(struct nec7210_priv *priv, unsigned int
unsigned long flags;
spin_lock_irqsave(&priv->register_page_lock, flags);
- retval = inb((unsigned long)(priv->iobase) + register_num * priv->offset);
+ retval = inb(priv->iobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
return retval;
}
@@ -1072,7 +1072,7 @@ void nec7210_locking_ioport_write_byte(struct nec7210_priv *priv, uint8_t data,
spin_lock_irqsave(&priv->register_page_lock, flags);
if (register_num == AUXMR)
udelay(1);
- outb(data, (unsigned long)(priv->iobase) + register_num * priv->offset);
+ outb(data, priv->iobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
}
EXPORT_SYMBOL(nec7210_locking_ioport_write_byte);
@@ -1080,7 +1080,7 @@ EXPORT_SYMBOL(nec7210_locking_ioport_write_byte);
uint8_t nec7210_iomem_read_byte(struct nec7210_priv *priv, unsigned int register_num)
{
- return readb(priv->iobase + register_num * priv->offset);
+ return readb(priv->mmiobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_iomem_read_byte);
@@ -1092,7 +1092,7 @@ void nec7210_iomem_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned
*/
nec7210_locking_iomem_write_byte(priv, data, register_num);
else
- writeb(data, priv->iobase + register_num * priv->offset);
+ writeb(data, priv->mmiobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_iomem_write_byte);
@@ -1102,7 +1102,7 @@ uint8_t nec7210_locking_iomem_read_byte(struct nec7210_priv *priv, unsigned int
unsigned long flags;
spin_lock_irqsave(&priv->register_page_lock, flags);
- retval = readb(priv->iobase + register_num * priv->offset);
+ retval = readb(priv->mmiobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
return retval;
}
@@ -1116,7 +1116,7 @@ void nec7210_locking_iomem_write_byte(struct nec7210_priv *priv, uint8_t data,
spin_lock_irqsave(&priv->register_page_lock, flags);
if (register_num == AUXMR)
udelay(1);
- writeb(data, priv->iobase + register_num * priv->offset);
+ writeb(data, priv->mmiobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
}
EXPORT_SYMBOL(nec7210_locking_iomem_write_byte);
diff --git a/drivers/staging/gpib/ni_usb/Makefile b/drivers/staging/gpib/ni_usb/Makefile
index e22b3b21a62c..469c5d16add3 100644
--- a/drivers/staging/gpib/ni_usb/Makefile
+++ b/drivers/staging/gpib/ni_usb/Makefile
@@ -1,4 +1,4 @@
-obj-m += ni_usb_gpib.o
+obj-$(CONFIG_GPIB_NI_USB) += ni_usb_gpib.o
diff --git a/drivers/staging/gpib/pc2/Makefile b/drivers/staging/gpib/pc2/Makefile
index 8148425e0f87..481ee4296e1b 100644
--- a/drivers/staging/gpib/pc2/Makefile
+++ b/drivers/staging/gpib/pc2/Makefile
@@ -1,5 +1,5 @@
-obj-m += pc2_gpib.o
+obj-$(CONFIG_GPIB_PC2) += pc2_gpib.o
diff --git a/drivers/staging/gpib/pc2/pc2_gpib.c b/drivers/staging/gpib/pc2/pc2_gpib.c
index 7b3b34f47341..d22af25125b1 100644
--- a/drivers/staging/gpib/pc2/pc2_gpib.c
+++ b/drivers/staging/gpib/pc2/pc2_gpib.c
@@ -426,7 +426,7 @@ int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv = &pc2_priv->nec7210_priv;
nec_priv->offset = pc2_reg_offset;
- if (request_region((unsigned long)config->ibbase, pc2_iosize, "pc2") == 0) {
+ if (request_region(config->ibbase, pc2_iosize, "pc2") == 0) {
pr_err("gpib: ioports are already in use\n");
return -1;
}
@@ -471,7 +471,7 @@ void pc2_detach(gpib_board_t *board)
free_irq(pc2_priv->irq, board);
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
- release_region((unsigned long)(nec_priv->iobase), pc2_iosize);
+ release_region(nec_priv->iobase, pc2_iosize);
}
if (nec_priv->dma_buffer) {
dma_free_coherent(board->dev, nec_priv->dma_buffer_length,
@@ -498,14 +498,14 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
nec_priv = &pc2_priv->nec7210_priv;
nec_priv->offset = pc2a_reg_offset;
- switch ((unsigned long)(config->ibbase)) {
+ switch (config->ibbase) {
case 0x02e1:
case 0x22e1:
case 0x42e1:
case 0x62e1:
break;
default:
- pr_err("PCIIa base range invalid, must be one of 0x[0246]2e1, but is 0x%p\n",
+ pr_err("PCIIa base range invalid, must be one of 0x[0246]2e1, but is 0x%d\n",
config->ibbase);
return -1;
}
@@ -522,7 +522,7 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
unsigned int err = 0;
for (i = 0; i < num_registers; i++) {
- if (check_region((unsigned long)config->ibbase + i * pc2a_reg_offset, 1))
+ if (check_region(config->ibbase + i * pc2a_reg_offset, 1))
err++;
}
if (config->ibirq && check_region(pc2a_clear_intr_iobase + config->ibirq, 1))
@@ -533,11 +533,11 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
}
#endif
for (i = 0; i < num_registers; i++) {
- if (!request_region((unsigned long)config->ibbase +
+ if (!request_region(config->ibbase +
i * pc2a_reg_offset, 1, "pc2a")) {
pr_err("gpib: ioports are already in use");
for (j = 0; j < i; j++)
- release_region((unsigned long)(config->ibbase) +
+ release_region(config->ibbase +
j * pc2a_reg_offset, 1);
return -1;
}
@@ -608,7 +608,7 @@ static void pc2a_common_detach(gpib_board_t *board, unsigned int num_registers)
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
for (i = 0; i < num_registers; i++)
- release_region((unsigned long)nec_priv->iobase +
+ release_region(nec_priv->iobase +
i * pc2a_reg_offset, 1);
}
if (pc2_priv->clear_intr_addr)
diff --git a/drivers/staging/gpib/tms9914/Makefile b/drivers/staging/gpib/tms9914/Makefile
index 81b7e3cf104c..4705ab07f413 100644
--- a/drivers/staging/gpib/tms9914/Makefile
+++ b/drivers/staging/gpib/tms9914/Makefile
@@ -1,5 +1,5 @@
-obj-m += tms9914.o
+obj-$(CONFIG_GPIB_TMS9914) += tms9914.o
diff --git a/drivers/staging/gpib/tms9914/tms9914.c b/drivers/staging/gpib/tms9914/tms9914.c
index 152b243b845b..ec8e1d4d762f 100644
--- a/drivers/staging/gpib/tms9914/tms9914.c
+++ b/drivers/staging/gpib/tms9914/tms9914.c
@@ -866,14 +866,14 @@ EXPORT_SYMBOL_GPL(tms9914_online);
// wrapper for inb
uint8_t tms9914_ioport_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
- return inb((unsigned long)(priv->iobase) + register_num * priv->offset);
+ return inb(priv->iobase + register_num * priv->offset);
}
EXPORT_SYMBOL_GPL(tms9914_ioport_read_byte);
// wrapper for outb
void tms9914_ioport_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
{
- outb(data, (unsigned long)(priv->iobase) + register_num * priv->offset);
+ outb(data, priv->iobase + register_num * priv->offset);
if (register_num == AUXCR)
udelay(1);
}
@@ -883,14 +883,14 @@ EXPORT_SYMBOL_GPL(tms9914_ioport_write_byte);
// wrapper for readb
uint8_t tms9914_iomem_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
- return readb(priv->iobase + register_num * priv->offset);
+ return readb(priv->mmiobase + register_num * priv->offset);
}
EXPORT_SYMBOL_GPL(tms9914_iomem_read_byte);
// wrapper for writeb
void tms9914_iomem_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
{
- writeb(data, priv->iobase + register_num * priv->offset);
+ writeb(data, priv->mmiobase + register_num * priv->offset);
if (register_num == AUXCR)
udelay(1);
}
diff --git a/drivers/staging/gpib/tnt4882/Makefile b/drivers/staging/gpib/tnt4882/Makefile
index f767c990db7a..a3c3fb96d5ed 100644
--- a/drivers/staging/gpib/tnt4882/Makefile
+++ b/drivers/staging/gpib/tnt4882/Makefile
@@ -1,5 +1,5 @@
ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
-obj-m += tnt4882.o
+obj-$(CONFIG_GPIB_NI_PCI_ISA) += tnt4882.o
tnt4882-objs := tnt4882_gpib.o mite.o
diff --git a/drivers/staging/gpib/tnt4882/mite.c b/drivers/staging/gpib/tnt4882/mite.c
index 0edf34d243e9..4bd352967616 100644
--- a/drivers/staging/gpib/tnt4882/mite.c
+++ b/drivers/staging/gpib/tnt4882/mite.c
@@ -148,72 +148,3 @@ void mite_list_devices(void)
}
pr_info("\n");
}
-
-int mite_bytes_transferred(struct mite_struct *mite, int chan)
-{
- int dar, fcr;
-
- dar = readl(mite->mite_io_addr + MITE_DAR + CHAN_OFFSET(chan));
- fcr = readl(mite->mite_io_addr + MITE_FCR + CHAN_OFFSET(chan)) & 0x000000FF;
- return dar - fcr;
-}
-
-int mite_dma_tcr(struct mite_struct *mite)
-{
- int tcr;
- int lkar;
-
- lkar = readl(mite->mite_io_addr + CHAN_OFFSET(0) + MITE_LKAR);
- tcr = readl(mite->mite_io_addr + CHAN_OFFSET(0) + MITE_TCR);
- MDPRINTK("lkar=0x%08x tcr=%d\n", lkar, tcr);
-
- return tcr;
-}
-
-void mite_dma_disarm(struct mite_struct *mite)
-{
- int chor;
-
- /* disarm */
- chor = CHOR_ABORT;
- writel(chor, mite->mite_io_addr + CHAN_OFFSET(0) + MITE_CHOR);
-}
-
-void mite_dump_regs(struct mite_struct *mite)
-{
- void *addr = 0;
- unsigned long temp = 0;
-
- pr_info("mite address is =0x%p\n", mite->mite_io_addr);
-
- addr = mite->mite_io_addr + MITE_CHOR + CHAN_OFFSET(0);
- pr_info("mite status[CHOR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CHOR_strings,temp);
- addr = mite->mite_io_addr + MITE_CHCR + CHAN_OFFSET(0);
- pr_info("mite status[CHCR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CHCR_strings,temp);
- addr = mite->mite_io_addr + MITE_TCR + CHAN_OFFSET(0);
- pr_info("mite status[TCR] at 0x%p =0x%08x\n", addr, readl(addr));
- addr = mite->mite_io_addr + MITE_MCR + CHAN_OFFSET(0);
- pr_info("mite status[MCR] at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_MCR_strings,temp);
- addr = mite->mite_io_addr + MITE_MAR + CHAN_OFFSET(0);
- pr_info("mite status[MAR] at 0x%p =0x%08x\n", addr, readl(addr));
- addr = mite->mite_io_addr + MITE_DCR + CHAN_OFFSET(0);
- pr_info("mite status[DCR] at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CR_strings,temp);
- addr = mite->mite_io_addr + MITE_DAR + CHAN_OFFSET(0);
- pr_info("mite status[DAR] at 0x%p =0x%08x\n", addr, readl(addr));
- addr = mite->mite_io_addr + MITE_LKCR + CHAN_OFFSET(0);
- pr_info("mite status[LKCR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CR_strings,temp);
- addr = mite->mite_io_addr + MITE_LKAR + CHAN_OFFSET(0);
- pr_info("mite status[LKAR]at 0x%p =0x%08x\n", addr, readl(addr));
-
- addr = mite->mite_io_addr + MITE_CHSR + CHAN_OFFSET(0);
- pr_info("mite status[CHSR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CHSR_strings,temp);
- addr = mite->mite_io_addr + MITE_FCR + CHAN_OFFSET(0);
- pr_info("mite status[FCR] at 0x%p =0x%08x\n\n", addr, readl(addr));
-}
-
diff --git a/drivers/staging/gpib/tnt4882/mite.h b/drivers/staging/gpib/tnt4882/mite.h
index 6454d069b8cc..edb873435b51 100644
--- a/drivers/staging/gpib/tnt4882/mite.h
+++ b/drivers/staging/gpib/tnt4882/mite.h
@@ -34,9 +34,9 @@ struct mite_struct {
struct pci_dev *pcidev;
unsigned long mite_phys_addr;
- void *mite_io_addr;
+ void __iomem *mite_io_addr;
unsigned long daq_phys_addr;
- void *daq_io_addr;
+ void __iomem *daq_io_addr;
int DMA_CheckNearEnd;
@@ -61,15 +61,6 @@ int mite_setup(struct mite_struct *mite);
void mite_unsetup(struct mite_struct *mite);
void mite_list_devices(void);
-int mite_dma_tcr(struct mite_struct *mite);
-
-void mite_dma_arm(struct mite_struct *mite);
-void mite_dma_disarm(struct mite_struct *mite);
-
-void mite_dump_regs(struct mite_struct *mite);
-void mite_setregs(struct mite_struct *mite, unsigned long ll_start, int chan, int dir);
-int mite_bytes_transferred(struct mite_struct *mite, int chan);
-
#define CHAN_OFFSET(x) (0x100 * (x))
/* DMA base for chan 0 is 0x500, chan 1 is 0x600 */
diff --git a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
index e49a952fa0d8..ce91c3eb768c 100644
--- a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
+++ b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
@@ -45,10 +45,6 @@ struct tnt4882_priv {
unsigned short imr0_bits;
unsigned short imr3_bits;
unsigned short auxg_bits; // bits written to auxiliary register G
- void (*io_writeb)(unsigned int value, void *address);
- void (*io_writew)(unsigned int value, void *address);
- unsigned int (*io_readb)(void *address);
- unsigned int (*io_readw)(void *address);
};
// interface functions
@@ -104,23 +100,23 @@ static const int atgpib_iosize = 32;
/* paged io */
static inline unsigned int tnt_paged_readb(struct tnt4882_priv *priv, unsigned long offset)
{
- priv->io_writeb(AUX_PAGEIN, priv->nec7210_priv.iobase + AUXMR * priv->nec7210_priv.offset);
+ iowrite8(AUX_PAGEIN, priv->nec7210_priv.mmiobase + AUXMR * priv->nec7210_priv.offset);
udelay(1);
- return priv->io_readb(priv->nec7210_priv.iobase + offset);
+ return ioread8(priv->nec7210_priv.mmiobase + offset);
}
static inline void tnt_paged_writeb(struct tnt4882_priv *priv, unsigned int value,
unsigned long offset)
{
- priv->io_writeb(AUX_PAGEIN, priv->nec7210_priv.iobase + AUXMR * priv->nec7210_priv.offset);
+ iowrite8(AUX_PAGEIN, priv->nec7210_priv.mmiobase + AUXMR * priv->nec7210_priv.offset);
udelay(1);
- priv->io_writeb(value, priv->nec7210_priv.iobase + offset);
+ iowrite8(value, priv->nec7210_priv.mmiobase + offset);
}
/* readb/writeb wrappers */
static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long offset)
{
- void *address = priv->nec7210_priv.iobase + offset;
+ void *address = priv->nec7210_priv.mmiobase + offset;
unsigned long flags;
unsigned short retval;
spinlock_t *register_lock = &priv->nec7210_priv.register_page_lock;
@@ -134,7 +130,7 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
switch (priv->nec7210_priv.type) {
case TNT4882:
case TNT5004:
- retval = priv->io_readb(address);
+ retval = ioread8(address);
break;
case NAT4882:
retval = tnt_paged_readb(priv, offset - tnt_pagein_offset);
@@ -149,7 +145,7 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
}
break;
default:
- retval = priv->io_readb(address);
+ retval = ioread8(address);
break;
}
spin_unlock_irqrestore(register_lock, flags);
@@ -158,7 +154,7 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, unsigned long offset)
{
- void *address = priv->nec7210_priv.iobase + offset;
+ void *address = priv->nec7210_priv.mmiobase + offset;
unsigned long flags;
spinlock_t *register_lock = &priv->nec7210_priv.register_page_lock;
@@ -170,7 +166,7 @@ static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, u
switch (priv->nec7210_priv.type) {
case TNT4882:
case TNT5004:
- priv->io_writeb(value, address);
+ iowrite8(value, address);
break;
case NAT4882:
tnt_paged_writeb(priv, value, offset - tnt_pagein_offset);
@@ -183,7 +179,7 @@ static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, u
}
break;
default:
- priv->io_writeb(value, address);
+ iowrite8(value, address);
break;
}
spin_unlock_irqrestore(register_lock, flags);
@@ -288,7 +284,7 @@ static int drain_fifo_words(struct tnt4882_priv *tnt_priv, uint8_t *buffer, int
while (fifo_word_available(tnt_priv) && count + 2 <= num_bytes) {
short word;
- word = tnt_priv->io_readw(nec_priv->iobase + FIFOB);
+ word = ioread16(nec_priv->mmiobase + FIFOB);
buffer[count++] = word & 0xff;
buffer[count++] = (word >> 8) & 0xff;
}
@@ -573,7 +569,7 @@ static int generic_write(gpib_board_t *board, uint8_t *buffer, size_t length,
word = buffer[count++] & 0xff;
if (count < length)
word |= (buffer[count++] << 8) & 0xff00;
- tnt_priv->io_writew(word, nec_priv->iobase + FIFOB);
+ iowrite16(word, nec_priv->mmiobase + FIFOB);
}
// avoid unnecessary HR_NFF interrupts
// tnt_priv->imr3_bits |= HR_NFF;
@@ -1269,10 +1265,6 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (tnt4882_allocate_private(board))
return -ENOMEM;
tnt_priv = board->private_data;
- tnt_priv->io_writeb = writeb_wrapper;
- tnt_priv->io_readb = readb_wrapper;
- tnt_priv->io_writew = writew_wrapper;
- tnt_priv->io_readw = readw_wrapper;
nec_priv = &tnt_priv->nec7210_priv;
nec_priv->type = TNT4882;
nec_priv->read_byte = nec7210_locking_iomem_read_byte;
@@ -1324,7 +1316,7 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
return retval;
}
- nec_priv->iobase = tnt_priv->mite->daq_io_addr;
+ nec_priv->mmiobase = tnt_priv->mite->daq_io_addr;
// get irq
if (request_irq(mite_irq(tnt_priv->mite), tnt4882_interrupt, isr_flags,
@@ -1359,7 +1351,7 @@ void ni_pci_detach(gpib_board_t *board)
if (tnt_priv) {
nec_priv = &tnt_priv->nec7210_priv;
- if (nec_priv->iobase)
+ if (nec_priv->mmiobase)
tnt4882_board_reset(tnt_priv, board);
if (tnt_priv->irq)
free_irq(tnt_priv->irq, board);
@@ -1400,7 +1392,7 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
struct tnt4882_priv *tnt_priv;
struct nec7210_priv *nec_priv;
int isr_flags = 0;
- void *iobase;
+ u32 iobase;
int irq;
board->status = 0;
@@ -1408,10 +1400,6 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
if (tnt4882_allocate_private(board))
return -ENOMEM;
tnt_priv = board->private_data;
- tnt_priv->io_writeb = outb_wrapper;
- tnt_priv->io_readb = inb_wrapper;
- tnt_priv->io_writew = outw_wrapper;
- tnt_priv->io_readw = inw_wrapper;
nec_priv = &tnt_priv->nec7210_priv;
nec_priv->type = chipset;
nec_priv->read_byte = nec7210_locking_ioport_read_byte;
@@ -1427,18 +1415,20 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
if (retval < 0)
return retval;
tnt_priv->pnp_dev = dev;
- iobase = (void *)(pnp_port_start(dev, 0));
+ iobase = pnp_port_start(dev, 0);
irq = pnp_irq(dev, 0);
} else {
iobase = config->ibbase;
irq = config->ibirq;
}
// allocate ioports
- if (!request_region((unsigned long)(iobase), atgpib_iosize, "atgpib")) {
+ if (!request_region(iobase, atgpib_iosize, "atgpib")) {
pr_err("tnt4882: failed to allocate ioports\n");
return -1;
}
- nec_priv->iobase = iobase;
+ nec_priv->mmiobase = ioport_map(iobase, atgpib_iosize);
+ if (!nec_priv->mmiobase)
+ return -1;
// get irq
if (request_irq(irq, tnt4882_interrupt, isr_flags, "atgpib", board)) {
@@ -1478,8 +1468,10 @@ void ni_isa_detach(gpib_board_t *board)
tnt4882_board_reset(tnt_priv, board);
if (tnt_priv->irq)
free_irq(tnt_priv->irq, board);
+ if (nec_priv->mmiobase)
+ ioport_unmap(nec_priv->mmiobase);
if (nec_priv->iobase)
- release_region((unsigned long)(nec_priv->iobase), atgpib_iosize);
+ release_region(nec_priv->iobase, atgpib_iosize);
if (tnt_priv->pnp_dev)
pnp_device_detach(tnt_priv->pnp_dev);
}
@@ -1817,10 +1809,6 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (tnt4882_allocate_private(board))
return -ENOMEM;
tnt_priv = board->private_data;
- tnt_priv->io_writeb = outb_wrapper;
- tnt_priv->io_readb = inb_wrapper;
- tnt_priv->io_writew = outw_wrapper;
- tnt_priv->io_readw = inw_wrapper;
nec_priv = &tnt_priv->nec7210_priv;
nec_priv->type = TNT4882;
nec_priv->read_byte = nec7210_locking_ioport_read_byte;
@@ -1835,7 +1823,10 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
return -EIO;
}
- nec_priv->iobase = (void *)(unsigned long)curr_dev->resource[0]->start;
+ nec_priv->mmiobase = ioport_map(curr_dev->resource[0]->start,
+ resource_size(curr_dev->resource[0]));
+ if (!nec_priv->mmiobase)
+ return -1;
// get irq
if (request_irq(curr_dev->irq, tnt4882_interrupt, isr_flags, "tnt4882", board)) {
@@ -1860,9 +1851,11 @@ void ni_pcmcia_detach(gpib_board_t *board)
nec_priv = &tnt_priv->nec7210_priv;
if (tnt_priv->irq)
free_irq(tnt_priv->irq, board);
+ if (nec_priv->mmiobase)
+ ioport_unmap(nec_priv->mmiobase);
if (nec_priv->iobase) {
tnt4882_board_reset(tnt_priv, board);
- release_region((unsigned long)nec_priv->iobase, pcmcia_gpib_iosize);
+ release_region(nec_priv->iobase, pcmcia_gpib_iosize);
}
}
tnt4882_free_private(board);
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 492612e8f8ba..140ee4f9c137 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -158,7 +158,7 @@ static int ad9832_write_frequency(struct ad9832_state *st,
static int ad9832_write_phase(struct ad9832_state *st,
unsigned long addr, unsigned long phase)
{
- if (phase > BIT(AD9832_PHASE_BITS))
+ if (phase >= BIT(AD9832_PHASE_BITS))
return -EINVAL;
st->phase_data[0] = cpu_to_be16((AD9832_CMD_PHA8BITSW << CMD_SHIFT) |
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 47e7d7e6d920..6e99e008c5f4 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -131,7 +131,7 @@ static int ad9834_write_frequency(struct ad9834_state *st,
static int ad9834_write_phase(struct ad9834_state *st,
unsigned long addr, unsigned long phase)
{
- if (phase > BIT(AD9834_PHASE_BITS))
+ if (phase >= BIT(AD9834_PHASE_BITS))
return -EINVAL;
st->data = cpu_to_be16(addr | phase);
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index fab11b98ca49..5ab4ce4daaeb 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -160,6 +160,7 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int
return ERR_PTR(ret);
}
+ of_node_put(sensor_specs.np);
if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ?
sensor_specs.args[0] : 0)) {
pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, child);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 5f9f06911795..68baf75bdadc 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -812,6 +812,9 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
uart->dl_write = up->dl_write;
if (uart->port.type != PORT_8250_CIR) {
+ if (uart_console_registered(&uart->port))
+ pm_runtime_get_sync(uart->port.dev);
+
if (serial8250_isa_config != NULL)
serial8250_isa_config(0, &uart->port,
&uart->capabilities);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 17f70e4bee43..9c59ec128bb4 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2692,7 +2692,7 @@ static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
{
u32 ucr3;
- uart_port_lock(&sport->port);
+ uart_port_lock_irq(&sport->port);
ucr3 = imx_uart_readl(sport, UCR3);
if (on) {
@@ -2714,7 +2714,7 @@ static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
imx_uart_writel(sport, ucr1, UCR1);
}
- uart_port_unlock(&sport->port);
+ uart_port_unlock_irq(&sport->port);
}
static int imx_uart_suspend_noirq(struct device *dev)
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 7dc254546075..1ec5d8c3aef8 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -1051,14 +1051,14 @@ static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (break_state)
stm32_usart_set_bits(port, ofs->rqr, USART_RQR_SBKRQ);
else
stm32_usart_clr_bits(port, ofs->rqr, USART_RQR_SBKRQ);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int stm32_usart_startup(struct uart_port *port)
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 9ffd94ddf8c7..786f20ef2238 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -237,12 +237,6 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
hba->vops->config_scaling_param(hba, p, data);
}
-static inline void ufshcd_vops_reinit_notify(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->reinit_notify)
- hba->vops->reinit_notify(hba);
-}
-
static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->mcq_config_resource)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 8a01e4393159..9c26e8767515 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -8858,7 +8858,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
ufshcd_device_reset(hba);
ufs_put_device_desc(hba);
ufshcd_hba_stop(hba);
- ufshcd_vops_reinit_notify(hba);
ret = ufshcd_hba_enable(hba);
if (ret) {
dev_err(hba->dev, "Host controller enable failed\n");
@@ -10591,14 +10590,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
/*
- * Set the default power management level for runtime and system PM.
+ * Set the default power management level for runtime and system PM if
+ * not set by the host controller drivers.
* Default power saving mode is to keep UFS link in Hibern8 state
* and UFS device in sleep state.
*/
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ if (!hba->rpm_lvl)
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ if (!hba->spm_lvl)
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 68040b2ab5f8..91e94fe990b4 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -368,6 +368,11 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
if (ret)
return ret;
+ if (phy->power_count) {
+ phy_power_off(phy);
+ phy_exit(phy);
+ }
+
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
if (ret) {
@@ -866,6 +871,7 @@ static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
*/
static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
+ const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (host->hw_ver.major == 0x2)
@@ -874,9 +880,8 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
if (host->hw_ver.major > 0x3)
hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
- if (of_device_is_compatible(hba->dev->of_node, "qcom,sm8550-ufshc") ||
- of_device_is_compatible(hba->dev->of_node, "qcom,sm8650-ufshc"))
- hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP;
+ if (drvdata && drvdata->quirks)
+ hba->quirks |= drvdata->quirks;
}
static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
@@ -1064,6 +1069,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
struct device *dev = hba->dev;
struct ufs_qcom_host *host;
struct ufs_clk_info *clki;
+ const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev);
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
@@ -1143,6 +1149,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
dev_warn(dev, "%s: failed to configure the testbus %d\n",
__func__, err);
+ if (drvdata && drvdata->no_phy_retention)
+ hba->spm_lvl = UFS_PM_LVL_5;
+
return 0;
out_variant_clear:
@@ -1579,13 +1588,6 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
}
#endif
-static void ufs_qcom_reinit_notify(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- phy_power_off(host->generic_phy);
-}
-
/* Resources */
static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
{.name = "ufs_mem",},
@@ -1825,7 +1827,6 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.device_reset = ufs_qcom_device_reset,
.config_scaling_param = ufs_qcom_config_scaling_param,
.program_key = ufs_qcom_ice_program_key,
- .reinit_notify = ufs_qcom_reinit_notify,
.mcq_config_resource = ufs_qcom_mcq_config_resource,
.get_hba_mac = ufs_qcom_get_hba_mac,
.op_runtime_config = ufs_qcom_op_runtime_config,
@@ -1868,9 +1869,15 @@ static void ufs_qcom_remove(struct platform_device *pdev)
platform_device_msi_free_irqs_all(hba->dev);
}
+static const struct ufs_qcom_drvdata ufs_qcom_sm8550_drvdata = {
+ .quirks = UFSHCD_QUIRK_BROKEN_LSDBS_CAP,
+ .no_phy_retention = true,
+};
+
static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
{ .compatible = "qcom,ufshc" },
- { .compatible = "qcom,sm8550-ufshc" },
+ { .compatible = "qcom,sm8550-ufshc", .data = &ufs_qcom_sm8550_drvdata },
+ { .compatible = "qcom,sm8650-ufshc", .data = &ufs_qcom_sm8550_drvdata },
{},
};
MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index b9de170983c9..919f53682beb 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -217,6 +217,11 @@ struct ufs_qcom_host {
bool esi_enabled;
};
+struct ufs_qcom_drvdata {
+ enum ufshcd_quirks quirks;
+ bool no_phy_retention;
+};
+
static inline u32
ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
{
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index f2801700be8e..1a7fc638213e 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -370,25 +370,29 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
data->pinctrl = devm_pinctrl_get(dev);
if (PTR_ERR(data->pinctrl) == -ENODEV)
data->pinctrl = NULL;
- else if (IS_ERR(data->pinctrl))
- return dev_err_probe(dev, PTR_ERR(data->pinctrl),
+ else if (IS_ERR(data->pinctrl)) {
+ ret = dev_err_probe(dev, PTR_ERR(data->pinctrl),
"pinctrl get failed\n");
+ goto err_put;
+ }
data->hsic_pad_regulator =
devm_regulator_get_optional(dev, "hsic");
if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
/* no pad regulator is needed */
data->hsic_pad_regulator = NULL;
- } else if (IS_ERR(data->hsic_pad_regulator))
- return dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator),
+ } else if (IS_ERR(data->hsic_pad_regulator)) {
+ ret = dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator),
"Get HSIC pad regulator error\n");
+ goto err_put;
+ }
if (data->hsic_pad_regulator) {
ret = regulator_enable(data->hsic_pad_regulator);
if (ret) {
dev_err(dev,
"Failed to enable HSIC pad regulator\n");
- return ret;
+ goto err_put;
}
}
}
@@ -402,13 +406,14 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
dev_err(dev,
"pinctrl_hsic_idle lookup failed, err=%ld\n",
PTR_ERR(pinctrl_hsic_idle));
- return PTR_ERR(pinctrl_hsic_idle);
+ ret = PTR_ERR(pinctrl_hsic_idle);
+ goto err_put;
}
ret = pinctrl_select_state(data->pinctrl, pinctrl_hsic_idle);
if (ret) {
dev_err(dev, "hsic_idle select failed, err=%d\n", ret);
- return ret;
+ goto err_put;
}
data->pinctrl_hsic_active = pinctrl_lookup_state(data->pinctrl,
@@ -417,7 +422,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
dev_err(dev,
"pinctrl_hsic_active lookup failed, err=%ld\n",
PTR_ERR(data->pinctrl_hsic_active));
- return PTR_ERR(data->pinctrl_hsic_active);
+ ret = PTR_ERR(data->pinctrl_hsic_active);
+ goto err_put;
}
}
@@ -527,6 +533,8 @@ disable_hsic_regulator:
if (pdata.flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
data->ci_pdev = NULL;
+err_put:
+ put_device(data->usbmisc_data->dev);
return ret;
}
@@ -551,6 +559,7 @@ static void ci_hdrc_imx_remove(struct platform_device *pdev)
if (data->hsic_pad_regulator)
regulator_disable(data->hsic_pad_regulator);
}
+ put_device(data->usbmisc_data->dev);
}
static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 5a2e43331064..ff1a941fd2ed 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1337,11 +1337,12 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
return -EINVAL;
+ alts = usblp->protocol[protocol].alt_setting;
+ if (alts < 0)
+ return -EINVAL;
+
/* Don't unnecessarily set the interface if there's a single alt. */
if (usblp->intf->num_altsetting > 1) {
- alts = usblp->protocol[protocol].alt_setting;
- if (alts < 0)
- return -EINVAL;
r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
if (r < 0) {
printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4b93c0bd1d4b..21ac9b464696 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2663,13 +2663,13 @@ int usb_new_device(struct usb_device *udev)
err = sysfs_create_link(&udev->dev.kobj,
&port_dev->dev.kobj, "port");
if (err)
- goto fail;
+ goto out_del_dev;
err = sysfs_create_link(&port_dev->dev.kobj,
&udev->dev.kobj, "device");
if (err) {
sysfs_remove_link(&udev->dev.kobj, "port");
- goto fail;
+ goto out_del_dev;
}
if (!test_and_set_bit(port1, hub->child_usage_bits))
@@ -2683,6 +2683,8 @@ int usb_new_device(struct usb_device *udev)
pm_runtime_put_sync_autosuspend(&udev->dev);
return err;
+out_del_dev:
+ device_del(&udev->dev);
fail:
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
pm_runtime_disable(&udev->dev);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 45d7af00f8d1..e857e532b35a 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -453,10 +453,11 @@ static int usb_port_runtime_suspend(struct device *dev)
static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
+ struct usb_device *udev = port_dev->child;
- if (port_dev->child) {
- usb_disable_usb2_hardware_lpm(port_dev->child);
- usb_unlocked_disable_lpm(port_dev->child);
+ if (udev && !udev->port_is_suspended) {
+ usb_disable_usb2_hardware_lpm(udev);
+ usb_unlocked_disable_lpm(udev);
}
}
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index ee73789326bc..f11570c8ffd0 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -464,6 +464,7 @@
#define DWC3_DCTL_TRGTULST_SS_INACT (DWC3_DCTL_TRGTULST(6))
/* These apply for core versions 1.94a and later */
+#define DWC3_DCTL_NYET_THRES_MASK (0xf << 20)
#define DWC3_DCTL_NYET_THRES(n) (((n) & 0xf) << 20)
#define DWC3_DCTL_KEEP_CONNECT BIT(19)
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index 5e3d1741701f..7d43da5f2897 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -309,6 +309,7 @@ static void dwc3_ti_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_set_suspended(dev);
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 83dc7304d701..31a654c6f15b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -4195,8 +4195,10 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
"LPM Erratum not available on dwc3 revisions < 2.40a\n");
- if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
+ if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A)) {
+ reg &= ~DWC3_DCTL_NYET_THRES_MASK;
reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
+ }
dwc3_gadget_dctl_write_safe(dwc, reg);
} else {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 566ff0b1282a..76521555e3c1 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -211,6 +211,8 @@ config USB_F_MIDI
config USB_F_MIDI2
tristate
+ select SND_UMP
+ select SND_UMP_LEGACY_RAWMIDI
config USB_F_HID
tristate
@@ -445,8 +447,6 @@ config USB_CONFIGFS_F_MIDI2
depends on USB_CONFIGFS
depends on SND
select USB_LIBCOMPOSITE
- select SND_UMP
- select SND_UMP_LEGACY_RAWMIDI
select USB_F_MIDI2
help
The MIDI 2.0 function driver provides the generic emulated
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 6499a88d346c..fba2a56dae97 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -827,11 +827,15 @@ static ssize_t gadget_string_s_store(struct config_item *item, const char *page,
{
struct gadget_string *string = to_gadget_string(item);
int size = min(sizeof(string->string), len + 1);
+ ssize_t cpy_len;
if (len > USB_MAX_STRING_LEN)
return -EINVAL;
- return strscpy(string->string, page, size);
+ cpy_len = strscpy(string->string, page, size);
+ if (cpy_len > 0 && string->string[cpy_len - 1] == '\n')
+ string->string[cpy_len - 1] = 0;
+ return len;
}
CONFIGFS_ATTR(gadget_string_, s);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index ad79eb0f729b..2dea9e42a0f8 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2285,7 +2285,7 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
struct usb_gadget_strings **lang;
int first_id;
- if (WARN_ON(ffs->state != FFS_ACTIVE
+ if ((ffs->state != FFS_ACTIVE
|| test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
return -EBADFD;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index ce5b77f89190..9b324821c93b 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1185,6 +1185,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
uac2->as_in_alt = 0;
}
+ std_ac_if_desc.bNumEndpoints = 0;
if (FUOUT_EN(uac2_opts) || FUIN_EN(uac2_opts)) {
uac2->int_ep = usb_ep_autoconfig(gadget, &fs_ep_int_desc);
if (!uac2->int_ep) {
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 53d9fc41acc5..bc143a86c2dd 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -1420,6 +1420,10 @@ void gserial_disconnect(struct gserial *gser)
/* REVISIT as above: how best to track this? */
port->port_line_coding = gser->port_line_coding;
+ /* disable endpoints, aborting down any active I/O */
+ usb_ep_disable(gser->out);
+ usb_ep_disable(gser->in);
+
port->port_usb = NULL;
gser->ioport = NULL;
if (port->port.count > 0) {
@@ -1431,10 +1435,6 @@ void gserial_disconnect(struct gserial *gser)
spin_unlock(&port->port_lock);
spin_unlock_irqrestore(&serial_port_lock, flags);
- /* disable endpoints, aborting down any active I/O */
- usb_ep_disable(gser->out);
- usb_ep_disable(gser->in);
-
/* finally, free any unused/unusable I/O buffers */
spin_lock_irqsave(&port->port_lock, flags);
if (port->port.count == 0)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index e6c9006bd568..db109b570c5c 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -290,7 +290,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
- if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
+ if ((priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) ||
+ (xhci->quirks & XHCI_SKIP_PHY_INIT))
hcd->skip_phy_initialization = 1;
if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index c24101f0a07a..9960ac2b10b7 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -223,6 +223,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ { USB_DEVICE(0x1B93, 0x1013) }, /* Phoenix Contact UPS Device */
{ USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
{ USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 64317b390d22..1e2ae0c6c41c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -621,7 +621,7 @@ static void option_instat_callback(struct urb *urb);
/* MeiG Smart Technology products */
#define MEIGSMART_VENDOR_ID 0x2dee
-/* MeiG Smart SRM825L based on Qualcomm 315 */
+/* MeiG Smart SRM815/SRM825L based on Qualcomm 315 */
#define MEIGSMART_PRODUCT_SRM825L 0x4d22
/* MeiG Smart SLM320 based on UNISOC UIS8910 */
#define MEIGSMART_PRODUCT_SLM320 0x4d41
@@ -2405,6 +2405,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM770A, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
@@ -2412,6 +2413,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(1) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff), /* TCL IK512 ECM */
.driver_info = NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2949, 0x8700, 0xff) }, /* Neoway N723-EA */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index e5ad23d86833..54f0b1c83317 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -255,6 +255,13 @@ UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
+/* Added by Lubomir Rintel <lkundrak@v3.sk>, a very fine chap */
+UNUSUAL_DEV( 0x0421, 0x06c2, 0x0000, 0x0406,
+ "Nokia",
+ "Nokia 208",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
#ifdef NO_SDDR09
UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
"Microtech",
diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c
index 22163d8f9eb0..0cdda06592fd 100644
--- a/drivers/usb/typec/tcpm/maxim_contaminant.c
+++ b/drivers/usb/typec/tcpm/maxim_contaminant.c
@@ -135,7 +135,7 @@ static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
if (mv < 0)
- return ret;
+ return mv;
/* OVP enable */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCOVPDIS, 0);
@@ -157,7 +157,7 @@ static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
if (mv < 0)
- return ret;
+ return mv;
/* Disable current source */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBURPCTRL, 0);
if (ret < 0)
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index ed32583829be..24a6a4354df8 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -700,7 +700,7 @@ static int tcpci_init(struct tcpc_dev *tcpc)
tcpci->alert_mask = reg;
- return tcpci_write16(tcpci, TCPC_ALERT_MASK, reg);
+ return 0;
}
irqreturn_t tcpci_irq(struct tcpci *tcpci)
@@ -923,22 +923,27 @@ static int tcpci_probe(struct i2c_client *client)
chip->data.set_orientation = err;
+ chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
+ if (IS_ERR(chip->tcpci))
+ return PTR_ERR(chip->tcpci);
+
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
_tcpci_irq,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&client->dev), chip);
if (err < 0)
- return err;
+ goto unregister_port;
- /*
- * Disable irq while registering port. If irq is configured as an edge
- * irq this allow to keep track and process the irq as soon as it is enabled.
- */
- disable_irq(client->irq);
- chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
- enable_irq(client->irq);
+ /* Enable chip interrupts at last */
+ err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, chip->tcpci->alert_mask);
+ if (err < 0)
+ goto unregister_port;
- return PTR_ERR_OR_ZERO(chip->tcpci);
+ return 0;
+
+unregister_port:
+ tcpci_unregister_port(chip->tcpci);
+ return err;
}
static void tcpci_remove(struct i2c_client *client)
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index fcb8e61136cf..740171f24ef9 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -646,7 +646,7 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
UCSI_CMD_CONNECTOR_MASK;
if (con_index == 0) {
ret = -EINVAL;
- goto unlock;
+ goto err_put;
}
con = &uc->ucsi->connector[con_index - 1];
ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
@@ -654,8 +654,8 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
ret = ucsi_sync_control_common(ucsi, command);
+err_put:
pm_runtime_put_sync(uc->dev);
-unlock:
mutex_unlock(&uc->lock);
return ret;
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 90948cd6d297..fed39d458090 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -185,6 +185,11 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
struct pmic_glink_ucsi *ucsi = ucsi_get_drvdata(con->ucsi);
int orientation;
+ if (!UCSI_CONSTAT(con, CONNECTED)) {
+ typec_set_orientation(con->port, TYPEC_ORIENTATION_NONE);
+ return;
+ }
+
if (con->num > PMIC_GLINK_MAX_PORTS ||
!ucsi->port_orientation[con->num - 1])
return;
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 1ab58da9f38a..1a4ed5a357d3 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1661,14 +1661,15 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
vm_fault_t ret = VM_FAULT_SIGBUS;
- if (order && (vmf->address & ((PAGE_SIZE << order) - 1) ||
+ pfn = vma_to_pfn(vma) + pgoff;
+
+ if (order && (pfn & ((1 << order) - 1) ||
+ vmf->address & ((PAGE_SIZE << order) - 1) ||
vmf->address + (PAGE_SIZE << order) > vma->vm_end)) {
ret = VM_FAULT_FALLBACK;
goto out;
}
- pfn = vma_to_pfn(vma);
-
down_read(&vdev->memory_lock);
if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
@@ -1676,18 +1677,18 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
switch (order) {
case 0:
- ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
break;
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
case PMD_ORDER:
- ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff,
- PFN_DEV), false);
+ ret = vmf_insert_pfn_pmd(vmf,
+ __pfn_to_pfn_t(pfn, PFN_DEV), false);
break;
#endif
#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
case PUD_ORDER:
- ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff,
- PFN_DEV), false);
+ ret = vmf_insert_pfn_pud(vmf,
+ __pfn_to_pfn_t(pfn, PFN_DEV), false);
break;
#endif
default:
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 819c75233235..3bc9ce6c575e 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -57,6 +57,8 @@ static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
int err, len;
len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
+ if (len > 0)
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
netfs_write_subrequest_terminated(subreq, len ?: err, false);
}
@@ -80,8 +82,10 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
if (pos + total >= i_size_read(rreq->inode))
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
- if (!err)
+ if (!err) {
subreq->transferred += total;
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ }
netfs_read_subreq_terminated(subreq, err, false);
}
diff --git a/fs/afs/addr_prefs.c b/fs/afs/addr_prefs.c
index a189ff8a5034..c0384201b8fe 100644
--- a/fs/afs/addr_prefs.c
+++ b/fs/afs/addr_prefs.c
@@ -413,8 +413,10 @@ int afs_proc_addr_prefs_write(struct file *file, char *buf, size_t size)
do {
argc = afs_split_string(&buf, argv, ARRAY_SIZE(argv));
- if (argc < 0)
- return argc;
+ if (argc < 0) {
+ ret = argc;
+ goto done;
+ }
if (argc < 2)
goto inval;
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
index b488072aee87..ec3db00bd081 100644
--- a/fs/afs/afs.h
+++ b/fs/afs/afs.h
@@ -10,7 +10,7 @@
#include <linux/in.h>
-#define AFS_MAXCELLNAME 256 /* Maximum length of a cell name */
+#define AFS_MAXCELLNAME 253 /* Maximum length of a cell name (DNS limited) */
#define AFS_MAXVOLNAME 64 /* Maximum length of a volume name */
#define AFS_MAXNSERVERS 8 /* Maximum servers in a basic volume record */
#define AFS_NMAXNSERVERS 13 /* Maximum servers in a N/U-class volume record */
diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h
index a06296c8827d..b835e25a2c02 100644
--- a/fs/afs/afs_vl.h
+++ b/fs/afs/afs_vl.h
@@ -13,6 +13,7 @@
#define AFS_VL_PORT 7003 /* volume location service port */
#define VL_SERVICE 52 /* RxRPC service ID for the Volume Location service */
#define YFS_VL_SERVICE 2503 /* Service ID for AuriStor upgraded VL service */
+#define YFS_VL_MAXCELLNAME 256 /* Maximum length of a cell name in YFS protocol */
enum AFSVL_Operations {
VLGETENTRYBYID = 503, /* AFS Get VLDB entry by ID */
diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
index 9f36e14f1c2d..f9e76b604f31 100644
--- a/fs/afs/vl_alias.c
+++ b/fs/afs/vl_alias.c
@@ -253,6 +253,7 @@ static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key)
static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
{
struct afs_cell *master;
+ size_t name_len;
char *cell_name;
cell_name = afs_vl_get_cell_name(cell, key);
@@ -264,8 +265,11 @@ static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
return 0;
}
- master = afs_lookup_cell(cell->net, cell_name, strlen(cell_name),
- NULL, false);
+ name_len = strlen(cell_name);
+ if (!name_len || name_len > AFS_MAXCELLNAME)
+ master = ERR_PTR(-EOPNOTSUPP);
+ else
+ master = afs_lookup_cell(cell->net, cell_name, name_len, NULL, false);
kfree(cell_name);
if (IS_ERR(master))
return PTR_ERR(master);
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index cac75f89b64a..55dd0fc5aad7 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -697,7 +697,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call)
return ret;
namesz = ntohl(call->tmp);
- if (namesz > AFS_MAXCELLNAME)
+ if (namesz > YFS_VL_MAXCELLNAME)
return afs_protocol_error(call, afs_eproto_cellname_len);
paddedsz = (namesz + 3) & ~3;
call->count = namesz;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 34107b55f834..ccb6aa8027c5 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -122,7 +122,7 @@ static void afs_issue_write_worker(struct work_struct *work)
if (subreq->debug_index == 3)
return netfs_write_subrequest_terminated(subreq, -ENOANO, false);
- if (!test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) {
+ if (!subreq->retry_count) {
set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
}
@@ -149,6 +149,9 @@ static void afs_issue_write_worker(struct work_struct *work)
afs_wait_for_operation(op);
ret = afs_put_operation(op);
switch (ret) {
+ case 0:
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ break;
case -EACCES:
case -EPERM:
case -ENOKEY:
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 3af8bb0c8d75..4d9305fa37a8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4878,25 +4878,29 @@ out_fail:
return ret;
}
+struct btrfs_uring_encoded_data {
+ struct btrfs_ioctl_encoded_io_args args;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov;
+ struct iov_iter iter;
+};
+
static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, flags);
size_t copy_end;
- struct btrfs_ioctl_encoded_io_args args = { 0 };
int ret;
u64 disk_bytenr, disk_io_size;
struct file *file;
struct btrfs_inode *inode;
struct btrfs_fs_info *fs_info;
struct extent_io_tree *io_tree;
- struct iovec iovstack[UIO_FASTIOV];
- struct iovec *iov = iovstack;
- struct iov_iter iter;
loff_t pos;
struct kiocb kiocb;
struct extent_state *cached_state = NULL;
u64 start, lockend;
void __user *sqe_addr;
+ struct btrfs_uring_encoded_data *data = io_uring_cmd_get_async_data(cmd)->op_data;
if (!capable(CAP_SYS_ADMIN)) {
ret = -EPERM;
@@ -4910,43 +4914,64 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
if (issue_flags & IO_URING_F_COMPAT) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
- struct btrfs_ioctl_encoded_io_args_32 args32;
-
copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, flags);
- if (copy_from_user(&args32, sqe_addr, copy_end)) {
- ret = -EFAULT;
- goto out_acct;
- }
- args.iov = compat_ptr(args32.iov);
- args.iovcnt = args32.iovcnt;
- args.offset = args32.offset;
- args.flags = args32.flags;
#else
return -ENOTTY;
#endif
} else {
copy_end = copy_end_kernel;
- if (copy_from_user(&args, sqe_addr, copy_end)) {
- ret = -EFAULT;
+ }
+
+ if (!data) {
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (!data) {
+ ret = -ENOMEM;
goto out_acct;
}
- }
- if (args.flags != 0)
- return -EINVAL;
+ io_uring_cmd_get_async_data(cmd)->op_data = data;
- ret = import_iovec(ITER_DEST, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
- &iov, &iter);
- if (ret < 0)
- goto out_acct;
+ if (issue_flags & IO_URING_F_COMPAT) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ struct btrfs_ioctl_encoded_io_args_32 args32;
- if (iov_iter_count(&iter) == 0) {
- ret = 0;
- goto out_free;
+ if (copy_from_user(&args32, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+
+ data->args.iov = compat_ptr(args32.iov);
+ data->args.iovcnt = args32.iovcnt;
+ data->args.offset = args32.offset;
+ data->args.flags = args32.flags;
+#endif
+ } else {
+ if (copy_from_user(&data->args, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ }
+
+ if (data->args.flags != 0) {
+ ret = -EINVAL;
+ goto out_acct;
+ }
+
+ data->iov = data->iovstack;
+ ret = import_iovec(ITER_DEST, data->args.iov, data->args.iovcnt,
+ ARRAY_SIZE(data->iovstack), &data->iov,
+ &data->iter);
+ if (ret < 0)
+ goto out_acct;
+
+ if (iov_iter_count(&data->iter) == 0) {
+ ret = 0;
+ goto out_free;
+ }
}
- pos = args.offset;
- ret = rw_verify_area(READ, file, &pos, args.len);
+ pos = data->args.offset;
+ ret = rw_verify_area(READ, file, &pos, data->args.len);
if (ret < 0)
goto out_free;
@@ -4959,15 +4984,16 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
start = ALIGN_DOWN(pos, fs_info->sectorsize);
lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
- ret = btrfs_encoded_read(&kiocb, &iter, &args, &cached_state,
+ ret = btrfs_encoded_read(&kiocb, &data->iter, &data->args, &cached_state,
&disk_bytenr, &disk_io_size);
if (ret < 0 && ret != -EIOCBQUEUED)
goto out_free;
file_accessed(file);
- if (copy_to_user(sqe_addr + copy_end, (const char *)&args + copy_end_kernel,
- sizeof(args) - copy_end_kernel)) {
+ if (copy_to_user(sqe_addr + copy_end,
+ (const char *)&data->args + copy_end_kernel,
+ sizeof(data->args) - copy_end_kernel)) {
if (ret == -EIOCBQUEUED) {
unlock_extent(io_tree, start, lockend, &cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
@@ -4977,40 +5003,22 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
}
if (ret == -EIOCBQUEUED) {
- u64 count;
-
- /*
- * If we've optimized things by storing the iovecs on the stack,
- * undo this.
- */
- if (!iov) {
- iov = kmalloc(sizeof(struct iovec) * args.iovcnt, GFP_NOFS);
- if (!iov) {
- unlock_extent(io_tree, start, lockend, &cached_state);
- btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
- ret = -ENOMEM;
- goto out_acct;
- }
-
- memcpy(iov, iovstack, sizeof(struct iovec) * args.iovcnt);
- }
-
- count = min_t(u64, iov_iter_count(&iter), disk_io_size);
+ u64 count = min_t(u64, iov_iter_count(&data->iter), disk_io_size);
/* Match ioctl by not returning past EOF if uncompressed. */
- if (!args.compression)
- count = min_t(u64, count, args.len);
+ if (!data->args.compression)
+ count = min_t(u64, count, data->args.len);
- ret = btrfs_uring_read_extent(&kiocb, &iter, start, lockend,
- cached_state, disk_bytenr,
- disk_io_size, count,
- args.compression, iov, cmd);
+ ret = btrfs_uring_read_extent(&kiocb, &data->iter, start, lockend,
+ cached_state, disk_bytenr, disk_io_size,
+ count, data->args.compression,
+ data->iov, cmd);
goto out_acct;
}
out_free:
- kfree(iov);
+ kfree(data->iov);
out_acct:
if (ret > 0)
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 204c928beaf9..531312efee8d 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1541,6 +1541,10 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
u64 extent_gen;
int ret;
+ if (unlikely(!extent_root)) {
+ btrfs_err(fs_info, "no valid extent root for scrub");
+ return -EUCLEAN;
+ }
memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
stripe->nr_sectors);
scrub_stripe_reset_bitmaps(stripe);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1cccaf9c2b0d..3d0ac8bdb21f 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -797,6 +797,10 @@ static int get_canonical_dev_path(const char *dev_path, char *canonical)
if (ret)
goto out;
resolved_path = d_path(&path, path_buf, PATH_MAX);
+ if (IS_ERR(resolved_path)) {
+ ret = PTR_ERR(resolved_path);
+ goto out;
+ }
ret = strscpy(canonical, resolved_path, PATH_MAX);
out:
kfree(path_buf);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index ddf0d5a448a7..c9e92c6941ec 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -174,10 +174,10 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
copy_page(workspace->buf + i * PAGE_SIZE,
data_in);
start += PAGE_SIZE;
- workspace->strm.avail_in =
- (in_buf_folios << PAGE_SHIFT);
}
workspace->strm.next_in = workspace->buf;
+ workspace->strm.avail_in = min(bytes_left,
+ in_buf_folios << PAGE_SHIFT);
} else {
unsigned int pg_off;
unsigned int cur_len;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 11ed523e528e..df905ae82929 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -748,8 +748,9 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
(u64)lim->max_segments << PAGE_SHIFT),
fs_info->sectorsize);
fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
- if (fs_info->max_zone_append_size < fs_info->max_extent_size)
- fs_info->max_extent_size = fs_info->max_zone_append_size;
+
+ fs_info->max_extent_size = min_not_zero(fs_info->max_extent_size,
+ fs_info->max_zone_append_size);
/*
* Check mount options here, because we might change fs_info->zoned
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 89b11336a836..1806bff8e59b 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -15,6 +15,7 @@
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/mount.h>
+#include <linux/security.h>
#include <linux/statfs.h>
#include <linux/ctype.h>
#include <linux/string.h>
@@ -576,7 +577,7 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
*/
static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
{
- char *secctx;
+ int err;
_enter(",%s", args);
@@ -585,16 +586,16 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
return -EINVAL;
}
- if (cache->secctx) {
+ if (cache->have_secid) {
pr_err("Second security context specified\n");
return -EINVAL;
}
- secctx = kstrdup(args, GFP_KERNEL);
- if (!secctx)
- return -ENOMEM;
+ err = security_secctx_to_secid(args, strlen(args), &cache->secid);
+ if (err)
+ return err;
- cache->secctx = secctx;
+ cache->have_secid = true;
return 0;
}
@@ -820,7 +821,6 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
put_cred(cache->cache_cred);
kfree(cache->rootdirname);
- kfree(cache->secctx);
kfree(cache->tag);
_leave("");
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 7b99bd98de75..38c236e38cef 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -122,7 +122,6 @@ struct cachefiles_cache {
#define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */
#define CACHEFILES_ONDEMAND_MODE 4 /* T if in on-demand read mode */
char *rootdirname; /* name of cache root directory */
- char *secctx; /* LSM security context */
char *tag; /* cache binding tag */
refcount_t unbind_pincount;/* refcount to do daemon unbind */
struct xarray reqs; /* xarray of pending on-demand requests */
@@ -130,6 +129,8 @@ struct cachefiles_cache {
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
u32 ondemand_id_next;
u32 msg_id_next;
+ u32 secid; /* LSM security id */
+ bool have_secid; /* whether "secid" was set */
};
static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c
index fe777164f1d8..fc6611886b3b 100644
--- a/fs/cachefiles/security.c
+++ b/fs/cachefiles/security.c
@@ -18,7 +18,7 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
struct cred *new;
int ret;
- _enter("{%s}", cache->secctx);
+ _enter("{%u}", cache->have_secid ? cache->secid : 0);
new = prepare_kernel_cred(current);
if (!new) {
@@ -26,8 +26,8 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
goto error;
}
- if (cache->secctx) {
- ret = set_security_override_from_ctx(new, cache->secctx);
+ if (cache->have_secid) {
+ ret = set_security_override(new, cache->secid);
if (ret < 0) {
put_cred(new);
pr_err("Security denies permission to nominate security context: error %d\n",
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 47dc96dfe386..16e198a26339 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -64,22 +64,13 @@ const struct file_operations *debugfs_real_fops(const struct file *filp)
}
EXPORT_SYMBOL_GPL(debugfs_real_fops);
-/**
- * debugfs_file_get - mark the beginning of file data access
- * @dentry: the dentry object whose data is being accessed.
- *
- * Up to a matching call to debugfs_file_put(), any successive call
- * into the file removing functions debugfs_remove() and
- * debugfs_remove_recursive() will block. Since associated private
- * file data may only get freed after a successful return of any of
- * the removal functions, you may safely access it after a successful
- * call to debugfs_file_get() without worrying about lifetime issues.
- *
- * If -%EIO is returned, the file has already been removed and thus,
- * it is not safe to access any of its data. If, on the other hand,
- * it is allowed to access the file data, zero is returned.
- */
-int debugfs_file_get(struct dentry *dentry)
+enum dbgfs_get_mode {
+ DBGFS_GET_ALREADY,
+ DBGFS_GET_REGULAR,
+ DBGFS_GET_SHORT,
+};
+
+static int __debugfs_file_get(struct dentry *dentry, enum dbgfs_get_mode mode)
{
struct debugfs_fsdata *fsd;
void *d_fsd;
@@ -96,15 +87,17 @@ int debugfs_file_get(struct dentry *dentry)
if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
fsd = d_fsd;
} else {
+ if (WARN_ON(mode == DBGFS_GET_ALREADY))
+ return -EINVAL;
+
fsd = kmalloc(sizeof(*fsd), GFP_KERNEL);
if (!fsd)
return -ENOMEM;
- if ((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT) {
+ if (mode == DBGFS_GET_SHORT) {
fsd->real_fops = NULL;
fsd->short_fops = (void *)((unsigned long)d_fsd &
- ~(DEBUGFS_FSDATA_IS_REAL_FOPS_BIT |
- DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT));
+ ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
} else {
fsd->real_fops = (void *)((unsigned long)d_fsd &
~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
@@ -138,6 +131,26 @@ int debugfs_file_get(struct dentry *dentry)
return 0;
}
+
+/**
+ * debugfs_file_get - mark the beginning of file data access
+ * @dentry: the dentry object whose data is being accessed.
+ *
+ * Up to a matching call to debugfs_file_put(), any successive call
+ * into the file removing functions debugfs_remove() and
+ * debugfs_remove_recursive() will block. Since associated private
+ * file data may only get freed after a successful return of any of
+ * the removal functions, you may safely access it after a successful
+ * call to debugfs_file_get() without worrying about lifetime issues.
+ *
+ * If -%EIO is returned, the file has already been removed and thus,
+ * it is not safe to access any of its data. If, on the other hand,
+ * it is allowed to access the file data, zero is returned.
+ */
+int debugfs_file_get(struct dentry *dentry)
+{
+ return __debugfs_file_get(dentry, DBGFS_GET_ALREADY);
+}
EXPORT_SYMBOL_GPL(debugfs_file_get);
/**
@@ -267,7 +280,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
const struct file_operations *real_fops = NULL;
int r;
- r = debugfs_file_get(dentry);
+ r = __debugfs_file_get(dentry, DBGFS_GET_REGULAR);
if (r)
return r == -EIO ? -ENOENT : r;
@@ -424,7 +437,8 @@ static void __full_proxy_fops_init(struct file_operations *proxy_fops,
proxy_fops->unlocked_ioctl = full_proxy_unlocked_ioctl;
}
-static int full_proxy_open(struct inode *inode, struct file *filp)
+static int full_proxy_open(struct inode *inode, struct file *filp,
+ enum dbgfs_get_mode mode)
{
struct dentry *dentry = F_DENTRY(filp);
const struct file_operations *real_fops;
@@ -432,7 +446,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
struct debugfs_fsdata *fsd;
int r;
- r = debugfs_file_get(dentry);
+ r = __debugfs_file_get(dentry, mode);
if (r)
return r == -EIO ? -ENOENT : r;
@@ -491,8 +505,22 @@ out:
return r;
}
+static int full_proxy_open_regular(struct inode *inode, struct file *filp)
+{
+ return full_proxy_open(inode, filp, DBGFS_GET_REGULAR);
+}
+
const struct file_operations debugfs_full_proxy_file_operations = {
- .open = full_proxy_open,
+ .open = full_proxy_open_regular,
+};
+
+static int full_proxy_open_short(struct inode *inode, struct file *filp)
+{
+ return full_proxy_open(inode, filp, DBGFS_GET_SHORT);
+}
+
+const struct file_operations debugfs_full_short_proxy_file_operations = {
+ .open = full_proxy_open_short,
};
ssize_t debugfs_attr_read(struct file *file, char __user *buf,
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 38a9c7eb97e6..e752009de929 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -229,7 +229,7 @@ static void debugfs_release_dentry(struct dentry *dentry)
return;
/* check it wasn't a dir (no fsdata) or automount (no real_fops) */
- if (fsd && fsd->real_fops) {
+ if (fsd && (fsd->real_fops || fsd->short_fops)) {
WARN_ON(!list_empty(&fsd->cancellations));
mutex_destroy(&fsd->cancellations_mtx);
}
@@ -455,8 +455,7 @@ struct dentry *debugfs_create_file_full(const char *name, umode_t mode,
const struct file_operations *fops)
{
if (WARN_ON((unsigned long)fops &
- (DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT |
- DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)))
+ DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
return ERR_PTR(-EINVAL);
return __debugfs_create_file(name, mode, parent, data,
@@ -471,15 +470,13 @@ struct dentry *debugfs_create_file_short(const char *name, umode_t mode,
const struct debugfs_short_fops *fops)
{
if (WARN_ON((unsigned long)fops &
- (DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT |
- DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)))
+ DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
return ERR_PTR(-EINVAL);
return __debugfs_create_file(name, mode, parent, data,
- fops ? &debugfs_full_proxy_file_operations :
+ fops ? &debugfs_full_short_proxy_file_operations :
&debugfs_noop_file_operations,
- (const void *)((unsigned long)fops |
- DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT));
+ fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file_short);
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index a3edfa4f0d8e..bbae4a228ef4 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -15,6 +15,7 @@ struct file_operations;
extern const struct file_operations debugfs_noop_file_operations;
extern const struct file_operations debugfs_open_proxy_file_operations;
extern const struct file_operations debugfs_full_proxy_file_operations;
+extern const struct file_operations debugfs_full_short_proxy_file_operations;
struct debugfs_fsdata {
const struct file_operations *real_fops;
@@ -40,11 +41,6 @@ struct debugfs_fsdata {
* pointer gets its lowest bit set.
*/
#define DEBUGFS_FSDATA_IS_REAL_FOPS_BIT BIT(0)
-/*
- * A dentry's ->d_fsdata, when pointing to real fops, is with
- * short fops instead of full fops.
- */
-#define DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT BIT(1)
/* Access BITS */
#define DEBUGFS_ALLOW_API BIT(0)
diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
index fe0a9b8a0cd0..3103b932b674 100644
--- a/fs/exfat/dir.c
+++ b/fs/exfat/dir.c
@@ -122,7 +122,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
type = exfat_get_entry_type(ep);
if (type == TYPE_UNUSED) {
brelse(bh);
- break;
+ goto out;
}
if (type != TYPE_FILE && type != TYPE_DIR) {
@@ -170,6 +170,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
}
}
+out:
dir_entry->namebuf.lfn[0] = '\0';
*cpos = EXFAT_DEN_TO_B(dentry);
return 0;
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index 773c320d68f3..9e5492ac409b 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -216,6 +216,16 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
if (err)
goto dec_used_clus;
+
+ if (num_clusters >= sbi->num_clusters - EXFAT_FIRST_CLUSTER) {
+ /*
+ * The cluster chain includes a loop, scan the
+ * bitmap to get the number of used clusters.
+ */
+ exfat_count_used_clusters(sb, &sbi->used_clusters);
+
+ return 0;
+ }
} while (clu != EXFAT_EOF_CLUSTER);
}
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index fb38769c3e39..05b51e721783 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -545,6 +545,7 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
while (pos < new_valid_size) {
u32 len;
struct folio *folio;
+ unsigned long off;
len = PAGE_SIZE - (pos & (PAGE_SIZE - 1));
if (pos + len > new_valid_size)
@@ -554,6 +555,9 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
if (err)
goto out;
+ off = offset_in_folio(folio, pos);
+ folio_zero_new_buffers(folio, off, off + len);
+
err = ops->write_end(file, mapping, pos, len, len, folio, NULL);
if (err < 0)
goto out;
@@ -563,6 +567,8 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
cond_resched();
}
+ return 0;
+
out:
return err;
}
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 97d2774760fe..099f80645072 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -330,8 +330,8 @@ static int exfat_find_empty_entry(struct inode *inode,
while ((dentry = exfat_search_empty_slot(sb, &hint_femp, p_dir,
num_entries, es)) < 0) {
- if (dentry == -EIO)
- break;
+ if (dentry != -ENOSPC)
+ return dentry;
if (exfat_check_max_dentries(inode))
return -ENOSPC;
diff --git a/fs/file.c b/fs/file.c
index fb1011cf6b4a..25c6e53b03f8 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -22,6 +22,7 @@
#include <linux/close_range.h>
#include <linux/file_ref.h>
#include <net/sock.h>
+#include <linux/init_task.h>
#include "internal.h"
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 494ac372ace0..e540d05549ff 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1681,6 +1681,8 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
*/
if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE))
nonseekable_open(inode, file);
+ if (!(ff->open_flags & FOPEN_KEEP_CACHE))
+ invalidate_inode_pages2(inode->i_mapping);
}
return err;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 88d0946b5bc9..7d92a5479998 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1541,8 +1541,10 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
*/
struct page **pages = kzalloc(max_pages * sizeof(struct page *),
GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ if (!pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
while (nbytes < *nbytesp && nr_pages < max_pages) {
unsigned nfolios, i;
@@ -1557,18 +1559,22 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
nbytes += ret;
- ret += start;
- /* Currently, all folios in FUSE are one page */
- nfolios = DIV_ROUND_UP(ret, PAGE_SIZE);
+ nfolios = DIV_ROUND_UP(ret + start, PAGE_SIZE);
+
+ for (i = 0; i < nfolios; i++) {
+ struct folio *folio = page_folio(pages[i]);
+ unsigned int offset = start +
+ (folio_page_idx(folio, pages[i]) << PAGE_SHIFT);
+ unsigned int len = min_t(unsigned int, ret, PAGE_SIZE - start);
- ap->descs[ap->num_folios].offset = start;
- fuse_folio_descs_length_init(ap->descs, ap->num_folios, nfolios);
- for (i = 0; i < nfolios; i++)
- ap->folios[i + ap->num_folios] = page_folio(pages[i]);
+ ap->descs[ap->num_folios].offset = offset;
+ ap->descs[ap->num_folios].length = len;
+ ap->folios[ap->num_folios] = folio;
+ start = 0;
+ ret -= len;
+ ap->num_folios++;
+ }
- ap->num_folios += nfolios;
- ap->descs[ap->num_folios - 1].length -=
- (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
nr_pages += nfolios;
}
kfree(pages);
@@ -1584,6 +1590,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
else
ap->args.out_pages = true;
+out:
*nbytesp = nbytes;
return ret < 0 ? ret : 0;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 3bee9b5dba5e..fe09c2093a93 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -349,11 +349,13 @@ static int hfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto bail_no_root;
res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
if (!res) {
- if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
+ if (fd.entrylength != sizeof(rec.dir)) {
res = -EIO;
goto bail_hfs_find;
}
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
+ if (rec.type != HFS_CDR_DIR)
+ res = -EIO;
}
if (res)
goto bail_hfs_find;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 955f19e27e47..d303e6c8900c 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1138,7 +1138,7 @@ static void iomap_write_delalloc_scan(struct inode *inode,
start_byte, end_byte, iomap, punch);
/* move offset to start of next folio in range */
- start_byte = folio_next_index(folio) << PAGE_SHIFT;
+ start_byte = folio_pos(folio) + folio_size(folio);
folio_unlock(folio);
folio_put(folio);
}
@@ -1774,7 +1774,8 @@ static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos)
*/
static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct folio *folio,
- struct inode *inode, loff_t pos, unsigned len)
+ struct inode *inode, loff_t pos, loff_t end_pos,
+ unsigned len)
{
struct iomap_folio_state *ifs = folio->private;
size_t poff = offset_in_folio(folio, pos);
@@ -1793,15 +1794,60 @@ new_ioend:
if (ifs)
atomic_add(len, &ifs->write_bytes_pending);
+
+ /*
+ * Clamp io_offset and io_size to the incore EOF so that ondisk
+ * file size updates in the ioend completion are byte-accurate.
+ * This avoids recovering files with zeroed tail regions when
+ * writeback races with appending writes:
+ *
+ * Thread 1: Thread 2:
+ * ------------ -----------
+ * write [A, A+B]
+ * update inode size to A+B
+ * submit I/O [A, A+BS]
+ * write [A+B, A+B+C]
+ * update inode size to A+B+C
+ * <I/O completes, updates disk size to min(A+B+C, A+BS)>
+ * <power failure>
+ *
+ * After reboot:
+ * 1) with A+B+C < A+BS, the file has zero padding in range
+ * [A+B, A+B+C]
+ *
+ * |< Block Size (BS) >|
+ * |DDDDDDDDDDDD0000000000000|
+ * ^ ^ ^
+ * A A+B A+B+C
+ * (EOF)
+ *
+ * 2) with A+B+C > A+BS, the file has zero padding in range
+ * [A+B, A+BS]
+ *
+ * |< Block Size (BS) >|< Block Size (BS) >|
+ * |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
+ * ^ ^ ^ ^
+ * A A+B A+BS A+B+C
+ * (EOF)
+ *
+ * D = Valid Data
+ * 0 = Zero Padding
+ *
+ * Note that this defeats the ability to chain the ioends of
+ * appending writes.
+ */
wpc->ioend->io_size += len;
+ if (wpc->ioend->io_offset + wpc->ioend->io_size > end_pos)
+ wpc->ioend->io_size = end_pos - wpc->ioend->io_offset;
+
wbc_account_cgroup_owner(wbc, folio, len);
return 0;
}
static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct folio *folio,
- struct inode *inode, u64 pos, unsigned dirty_len,
- unsigned *count)
+ struct inode *inode, u64 pos, u64 end_pos,
+ unsigned dirty_len, unsigned *count)
{
int error;
@@ -1826,7 +1872,7 @@ static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
break;
default:
error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
- map_len);
+ end_pos, map_len);
if (!error)
(*count)++;
break;
@@ -1897,11 +1943,11 @@ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
* remaining memory is zeroed when mapped, and writes to that
* region are not written out to the file.
*
- * Also adjust the writeback range to skip all blocks entirely
- * beyond i_size.
+ * Also adjust the end_pos to the end of file and skip writeback
+ * for all blocks entirely beyond i_size.
*/
folio_zero_segment(folio, poff, folio_size(folio));
- *end_pos = round_up(isize, i_blocksize(inode));
+ *end_pos = isize;
}
return true;
@@ -1914,6 +1960,7 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
struct inode *inode = folio->mapping->host;
u64 pos = folio_pos(folio);
u64 end_pos = pos + folio_size(folio);
+ u64 end_aligned = 0;
unsigned count = 0;
int error = 0;
u32 rlen;
@@ -1955,9 +2002,10 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
/*
* Walk through the folio to find dirty areas to write back.
*/
- while ((rlen = iomap_find_dirty_range(folio, &pos, end_pos))) {
+ end_aligned = round_up(end_pos, i_blocksize(inode));
+ while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
- pos, rlen, &count);
+ pos, end_pos, rlen, &count);
if (error)
break;
pos += rlen;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 9153ff3a08e7..e8e80761ac73 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -772,9 +772,9 @@ start_journal_io:
/*
* If the journal is not located on the file system device,
* then we must flush the file system device before we issue
- * the commit record
+ * the commit record and update the journal tail sequence.
*/
- if (commit_transaction->t_need_data_flush &&
+ if ((commit_transaction->t_need_data_flush || update_tail) &&
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(journal->j_fs_dev);
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 4556e4689024..ce63d5fde9c3 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -654,7 +654,7 @@ static void flush_descriptor(journal_t *journal,
set_buffer_jwrite(descriptor);
BUFFER_TRACE(descriptor, "write");
set_buffer_dirty(descriptor);
- write_dirty_buffer(descriptor, REQ_SYNC);
+ write_dirty_buffer(descriptor, JBD2_JOURNAL_REQ_FLAGS);
}
#endif
diff --git a/fs/mount.h b/fs/mount.h
index 185fc56afc13..179f690a0c72 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -38,6 +38,7 @@ struct mount {
struct dentry *mnt_mountpoint;
struct vfsmount mnt;
union {
+ struct rb_node mnt_node; /* node in the ns->mounts rbtree */
struct rcu_head mnt_rcu;
struct llist_node mnt_llist;
};
@@ -51,10 +52,7 @@ struct mount {
struct list_head mnt_child; /* and going through their mnt_child */
struct list_head mnt_instance; /* mount instance on sb->s_mounts */
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
- union {
- struct rb_node mnt_node; /* Under ns->mounts */
- struct list_head mnt_list;
- };
+ struct list_head mnt_list;
struct list_head mnt_expire; /* link in fs-specific expiry list */
struct list_head mnt_share; /* circular list of shared mounts */
struct list_head mnt_slave_list;/* list of slave mounts */
@@ -145,11 +143,16 @@ static inline bool is_anon_ns(struct mnt_namespace *ns)
return ns->seq == 0;
}
+static inline bool mnt_ns_attached(const struct mount *mnt)
+{
+ return !RB_EMPTY_NODE(&mnt->mnt_node);
+}
+
static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list)
{
- WARN_ON(!(mnt->mnt.mnt_flags & MNT_ONRB));
- mnt->mnt.mnt_flags &= ~MNT_ONRB;
+ WARN_ON(!mnt_ns_attached(mnt));
rb_erase(&mnt->mnt_node, &mnt->mnt_ns->mounts);
+ RB_CLEAR_NODE(&mnt->mnt_node);
list_add_tail(&mnt->mnt_list, dt_list);
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 23e81c2a1e3f..eac057e56948 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -344,6 +344,7 @@ static struct mount *alloc_vfsmnt(const char *name)
INIT_HLIST_NODE(&mnt->mnt_mp_list);
INIT_LIST_HEAD(&mnt->mnt_umounting);
INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
+ RB_CLEAR_NODE(&mnt->mnt_node);
mnt->mnt.mnt_idmap = &nop_mnt_idmap;
}
return mnt;
@@ -1124,7 +1125,7 @@ static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
struct rb_node **link = &ns->mounts.rb_node;
struct rb_node *parent = NULL;
- WARN_ON(mnt->mnt.mnt_flags & MNT_ONRB);
+ WARN_ON(mnt_ns_attached(mnt));
mnt->mnt_ns = ns;
while (*link) {
parent = *link;
@@ -1135,7 +1136,6 @@ static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
}
rb_link_node(&mnt->mnt_node, parent, link);
rb_insert_color(&mnt->mnt_node, &ns->mounts);
- mnt->mnt.mnt_flags |= MNT_ONRB;
}
/*
@@ -1305,7 +1305,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
}
mnt->mnt.mnt_flags = old->mnt.mnt_flags;
- mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL|MNT_ONRB);
+ mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
atomic_inc(&sb->s_active);
mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
@@ -1763,7 +1763,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
/* Gather the mounts to umount */
for (p = mnt; p; p = next_mnt(p, mnt)) {
p->mnt.mnt_flags |= MNT_UMOUNT;
- if (p->mnt.mnt_flags & MNT_ONRB)
+ if (mnt_ns_attached(p))
move_from_ns(p, &tmp_list);
else
list_move(&p->mnt_list, &tmp_list);
@@ -1912,16 +1912,14 @@ static int do_umount(struct mount *mnt, int flags)
event++;
if (flags & MNT_DETACH) {
- if (mnt->mnt.mnt_flags & MNT_ONRB ||
- !list_empty(&mnt->mnt_list))
+ if (mnt_ns_attached(mnt) || !list_empty(&mnt->mnt_list))
umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
} else {
shrink_submounts(mnt);
retval = -EBUSY;
if (!propagate_mount_busy(mnt, 2)) {
- if (mnt->mnt.mnt_flags & MNT_ONRB ||
- !list_empty(&mnt->mnt_list))
+ if (mnt_ns_attached(mnt) || !list_empty(&mnt->mnt_list))
umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
retval = 0;
}
@@ -2055,9 +2053,15 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
static bool is_mnt_ns_file(struct dentry *dentry)
{
+ struct ns_common *ns;
+
/* Is this a proxy for a mount namespace? */
- return dentry->d_op == &ns_dentry_operations &&
- dentry->d_fsdata == &mntns_operations;
+ if (dentry->d_op != &ns_dentry_operations)
+ return false;
+
+ ns = d_inode(dentry)->i_private;
+
+ return ns->ops == &mntns_operations;
}
struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 7ac34550c403..4dc9b8286355 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -275,22 +275,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read) {
ret = rreq->netfs_ops->prepare_read(subreq);
- if (ret < 0) {
- atomic_dec(&rreq->nr_outstanding);
- netfs_put_subrequest(subreq, false,
- netfs_sreq_trace_put_cancel);
- break;
- }
+ if (ret < 0)
+ goto prep_failed;
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
}
slice = netfs_prepare_read_iterator(subreq);
- if (slice < 0) {
- atomic_dec(&rreq->nr_outstanding);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
- ret = slice;
- break;
- }
+ if (slice < 0)
+ goto prep_iter_failed;
rreq->netfs_ops->issue_read(subreq);
goto done;
@@ -302,6 +294,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
netfs_stat(&netfs_n_rh_zero);
slice = netfs_prepare_read_iterator(subreq);
+ if (slice < 0)
+ goto prep_iter_failed;
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
netfs_read_subreq_terminated(subreq, 0, false);
goto done;
@@ -310,6 +304,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
if (source == NETFS_READ_FROM_CACHE) {
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
slice = netfs_prepare_read_iterator(subreq);
+ if (slice < 0)
+ goto prep_iter_failed;
netfs_read_cache_to_pagecache(rreq, subreq);
goto done;
}
@@ -318,6 +314,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
WARN_ON_ONCE(1);
break;
+ prep_iter_failed:
+ ret = slice;
+ prep_failed:
+ subreq->error = ret;
+ atomic_dec(&rreq->nr_outstanding);
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
+ break;
+
done:
size -= slice;
start += slice;
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
index 88f2adfab75e..f9421f3e6d37 100644
--- a/fs/netfs/direct_write.c
+++ b/fs/netfs/direct_write.c
@@ -67,7 +67,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
* allocate a sufficiently large bvec array and may shorten the
* request.
*/
- if (async || user_backed_iter(iter)) {
+ if (user_backed_iter(iter)) {
n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
if (n < 0) {
ret = n;
@@ -77,6 +77,11 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
wreq->direct_bv_count = n;
wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
} else {
+ /* If this is a kernel-generated async DIO request,
+ * assume that any resources the iterator points to
+ * (eg. a bio_vec array) will persist till the end of
+ * the op.
+ */
wreq->iter = *iter;
}
@@ -104,7 +109,6 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
TASK_UNINTERRUPTIBLE);
- smp_rmb(); /* Read error/transferred after RIP flag */
ret = wreq->error;
if (ret == 0) {
ret = wreq->transferred;
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index 3cbb289535a8..e8624f5c7fcc 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -62,10 +62,14 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
} else {
trace_netfs_folio(folio, netfs_folio_trace_read_done);
}
+
+ folioq_clear(folioq, slot);
} else {
// TODO: Use of PG_private_2 is deprecated.
if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot);
+ else
+ folioq_clear(folioq, slot);
}
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
@@ -77,8 +81,6 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
folio_unlock(folio);
}
}
-
- folioq_clear(folioq, slot);
}
/*
@@ -247,16 +249,17 @@ donation_changed:
/* Deal with the trickiest case: that this subreq is in the middle of a
* folio, not touching either edge, but finishes first. In such a
- * case, we donate to the previous subreq, if there is one, so that the
- * donation is only handled when that completes - and remove this
- * subreq from the list.
+ * case, we donate to the previous subreq, if there is one and if it is
+ * contiguous, so that the donation is only handled when that completes
+ * - and remove this subreq from the list.
*
* If the previous subreq finished first, we will have acquired their
* donation and should be able to unlock folios and/or donate nextwards.
*/
if (!subreq->consumed &&
!prev_donated &&
- !list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
+ !list_is_first(&subreq->rreq_link, &rreq->subrequests) &&
+ subreq->start == prev->start + prev->len) {
prev = list_prev_entry(subreq, rreq_link);
WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
subreq->start += subreq->len;
@@ -378,8 +381,7 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq)
task_io_account_read(rreq->transferred);
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
- clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
- wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
+ clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
trace_netfs_rreq(rreq, netfs_rreq_trace_done);
netfs_clear_subrequests(rreq, false);
@@ -438,7 +440,7 @@ void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
rreq->origin == NETFS_READPAGE ||
rreq->origin == NETFS_READ_FOR_WRITE)) {
netfs_consume_read_data(subreq, was_async);
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
}
}
EXPORT_SYMBOL(netfs_read_subreq_progress);
@@ -497,7 +499,7 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
rreq->origin == NETFS_READPAGE ||
rreq->origin == NETFS_READ_FOR_WRITE)) {
netfs_consume_read_data(subreq, was_async);
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
}
rreq->transferred += subreq->transferred;
}
@@ -511,10 +513,13 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
} else {
trace_netfs_sreq(subreq, netfs_sreq_trace_short);
if (subreq->transferred > subreq->consumed) {
- __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
- set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
- } else if (!__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
+ /* If we didn't read new data, abandon retry. */
+ if (subreq->retry_count &&
+ test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
+ __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
+ }
+ } else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
} else {
diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
index ba5af89d37fa..54d5004fec18 100644
--- a/fs/netfs/read_pgpriv2.c
+++ b/fs/netfs/read_pgpriv2.c
@@ -170,6 +170,10 @@ void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
netfs_stat(&netfs_n_wh_copy_to_cache);
+ if (!wreq->io_streams[1].avail) {
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ goto couldnt_start;
+ }
for (;;) {
error = netfs_pgpriv2_copy_folio(wreq, folio);
diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
index 0350592ea804..16b676c68dcd 100644
--- a/fs/netfs/read_retry.c
+++ b/fs/netfs/read_retry.c
@@ -49,13 +49,15 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
* up to the first permanently failed one.
*/
if (!rreq->netfs_ops->prepare_read &&
- !test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) {
+ !rreq->cache_resources.ops) {
struct netfs_io_subrequest *subreq;
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
break;
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ subreq->retry_count++;
netfs_reset_iter(subreq);
netfs_reissue_read(rreq, subreq);
}
@@ -137,7 +139,8 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
stream0->sreq_max_len = subreq->len;
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ subreq->retry_count++;
spin_lock_bh(&rreq->lock);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
@@ -149,7 +152,8 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
BUG_ON(!len);
/* Renegotiate max_len (rsize) */
- if (rreq->netfs_ops->prepare_read(subreq) < 0) {
+ if (rreq->netfs_ops->prepare_read &&
+ rreq->netfs_ops->prepare_read(subreq) < 0) {
trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
}
@@ -213,7 +217,6 @@ abandon:
subreq->error = -ENOMEM;
__clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __clear_bit(NETFS_SREQ_RETRYING, &subreq->flags);
}
spin_lock_bh(&rreq->lock);
list_splice_tail_init(&queue, &rreq->subrequests);
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 1d438be2e1b4..ca3a11ed9b54 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -179,7 +179,6 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
struct iov_iter source = subreq->io_iter;
iov_iter_revert(&source, subreq->len - source.count);
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_write(stream, subreq, &source);
}
@@ -234,7 +233,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
/* Renegotiate max_len (wsize) */
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ subreq->retry_count++;
stream->prepare_write(subreq);
part = min(len, stream->sreq_max_len);
@@ -279,7 +278,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
subreq->start = start;
subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
subreq->stream_nr = to->stream_nr;
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ subreq->retry_count = 1;
trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
refcount_read(&subreq->ref),
@@ -501,8 +500,7 @@ reassess_streams:
goto need_retry;
if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
trace_netfs_rreq(wreq, netfs_rreq_trace_unpause);
- clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags);
- wake_up_bit(&wreq->flags, NETFS_RREQ_PAUSE);
+ clear_and_wake_up_bit(NETFS_RREQ_PAUSE, &wreq->flags);
}
if (notes & NEED_REASSESS) {
@@ -605,8 +603,7 @@ void netfs_write_collection_worker(struct work_struct *work)
_debug("finished");
trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
- clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
- wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
+ clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
if (wreq->iocb) {
size_t written = min(wreq->transferred, wreq->len);
@@ -714,8 +711,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
- clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
- wake_up_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS);
+ clear_and_wake_up_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
/* If we are at the head of the queue, wake up the collector,
* transferring a ref to it if we were the ones to do so.
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index bf6d507578e5..ff0e82505a0b 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -244,6 +244,8 @@ void netfs_reissue_write(struct netfs_io_stream *stream,
iov_iter_advance(source, size);
iov_iter_truncate(&subreq->io_iter, size);
+ subreq->retry_count++;
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
netfs_do_issue_write(stream, subreq);
}
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 810269ee0a50..d49e4ce27999 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -263,6 +263,12 @@ int nfs_netfs_readahead(struct readahead_control *ractl)
static atomic_t nfs_netfs_debug_id;
static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
{
+ if (!file) {
+ if (WARN_ON_ONCE(rreq->origin != NETFS_PGPRIV2_COPY_TO_CACHE))
+ return -EIO;
+ return 0;
+ }
+
rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
@@ -274,7 +280,8 @@ static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *fi
static void nfs_netfs_free_request(struct netfs_io_request *rreq)
{
- put_nfs_open_context(rreq->netfs_priv);
+ if (rreq->netfs_priv)
+ put_nfs_open_context(rreq->netfs_priv);
}
static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index dec553034027..e933f9c65d90 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -47,10 +47,8 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode)
size = f->handle_bytes >> 2;
ret = exportfs_encode_fid(inode, (struct fid *)f->f_handle, &size);
- if ((ret == FILEID_INVALID) || (ret < 0)) {
- WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
+ if ((ret == FILEID_INVALID) || (ret < 0))
return;
- }
f->handle_type = ret;
f->handle_bytes = size * sizeof(u32);
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 3601ddfeddc2..0c28e5fa3407 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -415,13 +415,13 @@ int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry,
return err;
}
-struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
+struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode,
bool is_upper)
{
struct ovl_fh *fh;
int fh_type, dwords;
int buflen = MAX_HANDLE_SZ;
- uuid_t *uuid = &real->d_sb->s_uuid;
+ uuid_t *uuid = &realinode->i_sb->s_uuid;
int err;
/* Make sure the real fid stays 32bit aligned */
@@ -438,13 +438,13 @@ struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
* the price or reconnecting the dentry.
*/
dwords = buflen >> 2;
- fh_type = exportfs_encode_fh(real, (void *)fh->fb.fid, &dwords, 0);
+ fh_type = exportfs_encode_inode_fh(realinode, (void *)fh->fb.fid,
+ &dwords, NULL, 0);
buflen = (dwords << 2);
err = -EIO;
- if (WARN_ON(fh_type < 0) ||
- WARN_ON(buflen > MAX_HANDLE_SZ) ||
- WARN_ON(fh_type == FILEID_INVALID))
+ if (fh_type < 0 || fh_type == FILEID_INVALID ||
+ WARN_ON(buflen > MAX_HANDLE_SZ))
goto out_err;
fh->fb.version = OVL_FH_VERSION;
@@ -480,7 +480,7 @@ struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin)
if (!ovl_can_decode_fh(origin->d_sb))
return NULL;
- return ovl_encode_real_fh(ofs, origin, false);
+ return ovl_encode_real_fh(ofs, d_inode(origin), false);
}
int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
@@ -505,7 +505,7 @@ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper,
const struct ovl_fh *fh;
int err;
- fh = ovl_encode_real_fh(ofs, upper, true);
+ fh = ovl_encode_real_fh(ofs, d_inode(upper), true);
if (IS_ERR(fh))
return PTR_ERR(fh);
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 5868cb222955..444aeeccb6da 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -176,35 +176,37 @@ static int ovl_connect_layer(struct dentry *dentry)
*
* Return 0 for upper file handle, > 0 for lower file handle or < 0 on error.
*/
-static int ovl_check_encode_origin(struct dentry *dentry)
+static int ovl_check_encode_origin(struct inode *inode)
{
- struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+ struct ovl_fs *ofs = OVL_FS(inode->i_sb);
bool decodable = ofs->config.nfs_export;
+ struct dentry *dentry;
+ int err;
/* No upper layer? */
if (!ovl_upper_mnt(ofs))
return 1;
/* Lower file handle for non-upper non-decodable */
- if (!ovl_dentry_upper(dentry) && !decodable)
+ if (!ovl_inode_upper(inode) && !decodable)
return 1;
/* Upper file handle for pure upper */
- if (!ovl_dentry_lower(dentry))
+ if (!ovl_inode_lower(inode))
return 0;
/*
* Root is never indexed, so if there's an upper layer, encode upper for
* root.
*/
- if (dentry == dentry->d_sb->s_root)
+ if (inode == d_inode(inode->i_sb->s_root))
return 0;
/*
* Upper decodable file handle for non-indexed upper.
*/
- if (ovl_dentry_upper(dentry) && decodable &&
- !ovl_test_flag(OVL_INDEX, d_inode(dentry)))
+ if (ovl_inode_upper(inode) && decodable &&
+ !ovl_test_flag(OVL_INDEX, inode))
return 0;
/*
@@ -213,14 +215,23 @@ static int ovl_check_encode_origin(struct dentry *dentry)
* ovl_connect_layer() will try to make origin's layer "connected" by
* copying up a "connectable" ancestor.
*/
- if (d_is_dir(dentry) && decodable)
- return ovl_connect_layer(dentry);
+ if (!decodable || !S_ISDIR(inode->i_mode))
+ return 1;
+
+ dentry = d_find_any_alias(inode);
+ if (!dentry)
+ return -ENOENT;
+
+ err = ovl_connect_layer(dentry);
+ dput(dentry);
+ if (err < 0)
+ return err;
/* Lower file handle for indexed and non-upper dir/non-dir */
return 1;
}
-static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry,
+static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct inode *inode,
u32 *fid, int buflen)
{
struct ovl_fh *fh = NULL;
@@ -231,13 +242,13 @@ static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry,
* Check if we should encode a lower or upper file handle and maybe
* copy up an ancestor to make lower file handle connectable.
*/
- err = enc_lower = ovl_check_encode_origin(dentry);
+ err = enc_lower = ovl_check_encode_origin(inode);
if (enc_lower < 0)
goto fail;
/* Encode an upper or lower file handle */
- fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_dentry_lower(dentry) :
- ovl_dentry_upper(dentry), !enc_lower);
+ fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_inode_lower(inode) :
+ ovl_inode_upper(inode), !enc_lower);
if (IS_ERR(fh))
return PTR_ERR(fh);
@@ -251,8 +262,8 @@ out:
return err;
fail:
- pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i)\n",
- dentry, err);
+ pr_warn_ratelimited("failed to encode file handle (ino=%lu, err=%i)\n",
+ inode->i_ino, err);
goto out;
}
@@ -260,19 +271,13 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
struct inode *parent)
{
struct ovl_fs *ofs = OVL_FS(inode->i_sb);
- struct dentry *dentry;
int bytes, buflen = *max_len << 2;
/* TODO: encode connectable file handles */
if (parent)
return FILEID_INVALID;
- dentry = d_find_any_alias(inode);
- if (!dentry)
- return FILEID_INVALID;
-
- bytes = ovl_dentry_to_fid(ofs, dentry, fid, buflen);
- dput(dentry);
+ bytes = ovl_dentry_to_fid(ofs, inode, fid, buflen);
if (bytes <= 0)
return FILEID_INVALID;
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 7e27b7d4adee..cea820cb3b55 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -542,7 +542,7 @@ int ovl_verify_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry,
struct ovl_fh *fh;
int err;
- fh = ovl_encode_real_fh(ofs, real, is_upper);
+ fh = ovl_encode_real_fh(ofs, d_inode(real), is_upper);
err = PTR_ERR(fh);
if (IS_ERR(fh)) {
fh = NULL;
@@ -738,7 +738,7 @@ int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin,
struct ovl_fh *fh;
int err;
- fh = ovl_encode_real_fh(ofs, origin, false);
+ fh = ovl_encode_real_fh(ofs, d_inode(origin), false);
if (IS_ERR(fh))
return PTR_ERR(fh);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index b361f35762be..0021e2025020 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -865,7 +865,7 @@ int ovl_copy_up_with_data(struct dentry *dentry);
int ovl_maybe_copy_up(struct dentry *dentry, int flags);
int ovl_copy_xattr(struct super_block *sb, const struct path *path, struct dentry *new);
int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upper, struct kstat *stat);
-struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
+struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode,
bool is_upper);
struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin);
int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 85925ec0051a..3310d1ad4d0e 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -179,8 +179,7 @@ static int qnx6_statfs(struct dentry *dentry, struct kstatfs *buf)
*/
static const char *qnx6_checkroot(struct super_block *s)
{
- static char match_root[2][3] = {".\0\0", "..\0"};
- int i, error = 0;
+ int error = 0;
struct qnx6_dir_entry *dir_entry;
struct inode *root = d_inode(s->s_root);
struct address_space *mapping = root->i_mapping;
@@ -189,11 +188,9 @@ static const char *qnx6_checkroot(struct super_block *s)
if (IS_ERR(folio))
return "error reading root directory";
dir_entry = kmap_local_folio(folio, 0);
- for (i = 0; i < 2; i++) {
- /* maximum 3 bytes - due to match_root limitation */
- if (strncmp(dir_entry[i].de_fname, match_root[i], 3))
- error = 1;
- }
+ if (memcmp(dir_entry[0].de_fname, ".", 2) ||
+ memcmp(dir_entry[1].de_fname, "..", 3))
+ error = 1;
folio_release_kmap(folio, dir_entry);
if (error)
return "error reading root directory.";
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index bd42a419458e..ab0b949924d7 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -152,8 +152,17 @@ again:
spin_unlock(&ses->ses_lock);
rc = cifs_negotiate_protocol(0, ses, server);
- if (!rc)
+ if (!rc) {
rc = cifs_setup_session(0, ses, server, ses->local_nls);
+ if ((rc == -EACCES) || (rc == -EHOSTDOWN) || (rc == -EKEYREVOKED)) {
+ /*
+ * Try alternate password for next reconnect if an alternate
+ * password is available.
+ */
+ if (ses->password2)
+ swap(ses->password2, ses->password);
+ }
+ }
/* do we need to reconnect tcon? */
if (rc || !tcon->need_reconnect) {
@@ -1319,14 +1328,16 @@ cifs_readv_callback(struct mid_q_entry *mid)
}
if (rdata->result == -ENODATA) {
- __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
+ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
} else {
size_t trans = rdata->subreq.transferred + rdata->got_bytes;
if (trans < rdata->subreq.len &&
rdata->subreq.start + trans == ictx->remote_i_size) {
- __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
+ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
+ } else if (rdata->got_bytes > 0) {
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
}
}
@@ -1670,10 +1681,13 @@ cifs_writev_callback(struct mid_q_entry *mid)
if (written > wdata->subreq.len)
written &= 0xFFFF;
- if (written < wdata->subreq.len)
+ if (written < wdata->subreq.len) {
result = -ENOSPC;
- else
+ } else {
result = written;
+ if (written > 0)
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &wdata->subreq.flags);
+ }
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index ddcc9e514a0e..eaa6be4456d0 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -1044,6 +1044,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
/* Release netns reference for this server. */
put_net(cifs_net_ns(server));
kfree(server->leaf_fullpath);
+ kfree(server->hostname);
kfree(server);
length = atomic_dec_return(&tcpSesAllocCount);
@@ -1670,8 +1671,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
kfree_sensitive(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
- kfree(server->hostname);
- server->hostname = NULL;
task = xchg(&server->tsk, NULL);
if (task)
diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
index 0f788031b740..e3f9213131c4 100644
--- a/fs/smb/client/namespace.c
+++ b/fs/smb/client/namespace.c
@@ -196,11 +196,28 @@ static struct vfsmount *cifs_do_automount(struct path *path)
struct smb3_fs_context tmp;
char *full_path;
struct vfsmount *mnt;
+ struct cifs_sb_info *mntpt_sb;
+ struct cifs_ses *ses;
if (IS_ROOT(mntpt))
return ERR_PTR(-ESTALE);
- cur_ctx = CIFS_SB(mntpt->d_sb)->ctx;
+ mntpt_sb = CIFS_SB(mntpt->d_sb);
+ ses = cifs_sb_master_tcon(mntpt_sb)->ses;
+ cur_ctx = mntpt_sb->ctx;
+
+ /*
+ * At this point, the root session should be in the mntpt sb. We should
+ * bring the sb context passwords in sync with the root session's
+ * passwords. This would help prevent unnecessary retries and password
+ * swaps for automounts.
+ */
+ mutex_lock(&ses->session_mutex);
+ rc = smb3_sync_session_ctx_passwords(mntpt_sb, ses);
+ mutex_unlock(&ses->session_mutex);
+
+ if (rc)
+ return ERR_PTR(rc);
fc = fs_context_for_submount(path->mnt->mnt_sb->s_type, mntpt);
if (IS_ERR(fc))
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 959359301250..0577556f0a41 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -4615,6 +4615,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
}
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
}
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
server->credits, server->in_flight,
@@ -4842,10 +4843,12 @@ smb2_writev_callback(struct mid_q_entry *mid)
cifs_stats_bytes_written(tcon, written);
- if (written < wdata->subreq.len)
+ if (written < wdata->subreq.len) {
wdata->result = -ENOSPC;
- else
+ } else if (written > 0) {
wdata->subreq.len = written;
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &wdata->subreq.flags);
+ }
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
@@ -5014,7 +5017,7 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
}
#endif
- if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags))
+ if (wdata->subreq.retry_count > 0)
smb2_set_replay(server, &rqst);
cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 23e21845f928..772deec5b90f 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -695,6 +695,9 @@ void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
struct smb2_hdr *rsp_hdr;
struct ksmbd_work *in_work = ksmbd_alloc_work_struct();
+ if (!in_work)
+ return;
+
if (allocate_interim_rsp_buf(in_work)) {
pr_err("smb_allocate_rsp_buf failed!\n");
ksmbd_free_work_struct(in_work);
@@ -3991,6 +3994,26 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
posix_info->DeviceId = cpu_to_le32(ksmbd_kstat->kstat->rdev);
posix_info->HardLinks = cpu_to_le32(ksmbd_kstat->kstat->nlink);
posix_info->Mode = cpu_to_le32(ksmbd_kstat->kstat->mode & 0777);
+ switch (ksmbd_kstat->kstat->mode & S_IFMT) {
+ case S_IFDIR:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_DIR << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFLNK:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_SYMLINK << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFCHR:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_CHARDEV << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFBLK:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_BLKDEV << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFIFO:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_FIFO << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFSOCK:
+ posix_info->Mode |= cpu_to_le32(POSIX_TYPE_SOCKET << POSIX_FILETYPE_SHIFT);
+ }
+
posix_info->Inode = cpu_to_le64(ksmbd_kstat->kstat->ino);
posix_info->DosAttributes =
S_ISDIR(ksmbd_kstat->kstat->mode) ?
@@ -5181,6 +5204,26 @@ static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
file_info->HardLinks = cpu_to_le32(stat.nlink);
file_info->Mode = cpu_to_le32(stat.mode & 0777);
+ switch (stat.mode & S_IFMT) {
+ case S_IFDIR:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_DIR << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFLNK:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_SYMLINK << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFCHR:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_CHARDEV << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFBLK:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_BLKDEV << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFIFO:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_FIFO << POSIX_FILETYPE_SHIFT);
+ break;
+ case S_IFSOCK:
+ file_info->Mode |= cpu_to_le32(POSIX_TYPE_SOCKET << POSIX_FILETYPE_SHIFT);
+ }
+
file_info->DeviceId = cpu_to_le32(stat.rdev);
/*
diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
index 649dacf7e8c4..17a0b18a8406 100644
--- a/fs/smb/server/smb2pdu.h
+++ b/fs/smb/server/smb2pdu.h
@@ -502,4 +502,14 @@ static inline void *smb2_get_msg(void *buf)
return buf + 4;
}
+#define POSIX_TYPE_FILE 0
+#define POSIX_TYPE_DIR 1
+#define POSIX_TYPE_SYMLINK 2
+#define POSIX_TYPE_CHARDEV 3
+#define POSIX_TYPE_BLKDEV 4
+#define POSIX_TYPE_FIFO 5
+#define POSIX_TYPE_SOCKET 6
+
+#define POSIX_FILETYPE_SHIFT 12
+
#endif /* _SMB2PDU_H */
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index 0ef3c9f0bfeb..c3785a5434f9 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -2283,8 +2283,7 @@ out:
ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
if (ibdev) {
- if (rdma_frwr_is_supported(&ibdev->attrs))
- rdma_capable = true;
+ rdma_capable = rdma_frwr_is_supported(&ibdev->attrs);
ib_device_put(ibdev);
}
}
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 88d167a5f8b7..40f08eac519c 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -1264,6 +1264,8 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
filepath,
flags,
path);
+ if (!is_last)
+ next[0] = '/';
if (err)
goto out2;
else if (is_last)
@@ -1271,7 +1273,6 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
path_put(parent_path);
*parent_path = *path;
- next[0] = '/';
remain_len -= filename_len + 1;
}
diff --git a/fs/xfs/libxfs/xfs_rtgroup.h b/fs/xfs/libxfs/xfs_rtgroup.h
index 7e7e491ff06f..2d7822644eff 100644
--- a/fs/xfs/libxfs/xfs_rtgroup.h
+++ b/fs/xfs/libxfs/xfs_rtgroup.h
@@ -272,7 +272,7 @@ static inline int xfs_initialize_rtgroups(struct xfs_mount *mp,
}
# define xfs_rtgroup_extents(mp, rgno) (0)
-# define xfs_update_last_rtgroup_size(mp, rgno) (-EOPNOTSUPP)
+# define xfs_update_last_rtgroup_size(mp, rgno) (0)
# define xfs_rtgroup_lock(rtg, gf) ((void)0)
# define xfs_rtgroup_unlock(rtg, gf) ((void)0)
# define xfs_rtgroup_trans_join(tp, rtg, gf) ((void)0)
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index f11d475898f2..201c26322ede 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -87,8 +87,9 @@ xfs_dquot_detach_buf(
}
spin_unlock(&qlip->qli_lock);
if (bp) {
+ xfs_buf_lock(bp);
list_del_init(&qlip->qli_item.li_bio_list);
- xfs_buf_rele(bp);
+ xfs_buf_relse(bp);
}
}
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index e61dd7dd2286..147bd3ee4f7b 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -53,8 +53,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
-void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
-void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
@@ -127,8 +126,7 @@ static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
-static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
-static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h
index 18e0a2fc3816..5178b72bc920 100644
--- a/include/linux/bus/stm32_firewall_device.h
+++ b/include/linux/bus/stm32_firewall_device.h
@@ -115,7 +115,7 @@ void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 su
#else /* CONFIG_STM32_FIREWALL */
int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
- unsigned int nb_firewall);
+ unsigned int nb_firewall)
{
return -ENODEV;
}
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index 0d5448c0b86c..a3ce553413de 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -18,6 +18,11 @@ struct io_uring_cmd {
u8 pdu[32]; /* available inline for free use */
};
+struct io_uring_cmd_data {
+ struct io_uring_sqe sqes[2];
+ void *op_data;
+};
+
static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
{
return sqe->cmd;
@@ -113,4 +118,9 @@ static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd
return cmd_to_io_kiocb(cmd)->tctx->task;
}
+static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_uring_cmd *cmd)
+{
+ return cmd_to_io_kiocb(cmd)->async_data;
+}
+
#endif /* _LINUX_IO_URING_CMD_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 5675af6b740c..75bf54e76f3b 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -335,7 +335,7 @@ struct iomap_ioend {
u16 io_type;
u16 io_flags; /* IOMAP_F_* */
struct inode *io_inode; /* file being written to */
- size_t io_size; /* size of the extent */
+ size_t io_size; /* size of data within eof */
loff_t io_offset; /* offset in the file */
sector_t io_sector; /* start sector of ioend */
struct bio io_bio; /* MUST BE LAST! */
diff --git a/include/linux/mount.h b/include/linux/mount.h
index c34c18b4e8f3..04213d8ef837 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -50,7 +50,7 @@ struct path;
#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | MNT_ONRB)
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
#define MNT_INTERNAL 0x4000
@@ -64,7 +64,6 @@ struct path;
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
#define MNT_UMOUNT 0x8000000
-#define MNT_ONRB 0x10000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 5eaceef41e6c..ecdd5ced16a8 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -185,6 +185,7 @@ struct netfs_io_subrequest {
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
unsigned int nr_segs; /* Number of segs in io_iter */
+ u8 retry_count; /* The number of retries (0 on initial pass) */
enum netfs_io_source source; /* Where to read from/write to */
unsigned char stream_nr; /* I/O stream this belongs to */
unsigned char curr_folioq_slot; /* Folio currently being read */
@@ -194,14 +195,13 @@ struct netfs_io_subrequest {
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
-#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
+#define NETFS_SREQ_MADE_PROGRESS 4 /* Set if we transferred at least some data */
#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
#define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */
#define NETFS_SREQ_HIT_EOF 7 /* Set if short due to EOF */
#define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */
#define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */
-#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */
-#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */
+#define NETFS_SREQ_FAILED 10 /* Set if the subreq failed unretryably */
};
enum netfs_io_origin {
@@ -269,7 +269,6 @@ struct netfs_io_request {
size_t prev_donated; /* Fallback for subreq->prev_donated */
refcount_t ref;
unsigned long flags;
-#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */
#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
#define NETFS_RREQ_FAILED 4 /* The request failed */
diff --git a/include/linux/poll.h b/include/linux/poll.h
index d1ea4f3714a8..12bb18e8b978 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -25,14 +25,14 @@
struct poll_table_struct;
-/*
+/*
* structures and helpers for f_op->poll implementations
*/
typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
/*
- * Do not touch the structure directly, use the access functions
- * poll_does_not_wait() and poll_requested_events() instead.
+ * Do not touch the structure directly, use the access function
+ * poll_requested_events() instead.
*/
typedef struct poll_table_struct {
poll_queue_proc _qproc;
@@ -41,18 +41,16 @@ typedef struct poll_table_struct {
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
- if (p && p->_qproc && wait_address)
+ if (p && p->_qproc) {
p->_qproc(filp, wait_address, p);
-}
-
-/*
- * Return true if it is guaranteed that poll will not wait. This is the case
- * if the poll() of another file descriptor in the set got an event, so there
- * is no need for waiting.
- */
-static inline bool poll_does_not_wait(const poll_table *p)
-{
- return p == NULL || p->_qproc == NULL;
+ /*
+ * This memory barrier is paired in the wq_has_sleeper().
+ * See the comment above prepare_to_wait(), we need to
+ * ensure that subsequent tests in this thread can't be
+ * reordered with __add_wait_queue() in _qproc() paths.
+ */
+ smp_mb();
+ }
}
/*
diff --git a/include/linux/pruss_driver.h b/include/linux/pruss_driver.h
index c9a31c567e85..2e18fef1a2e1 100644
--- a/include/linux/pruss_driver.h
+++ b/include/linux/pruss_driver.h
@@ -144,32 +144,32 @@ static inline int pruss_release_mem_region(struct pruss *pruss,
static inline int pruss_cfg_get_gpmux(struct pruss *pruss,
enum pruss_pru_id pru_id, u8 *mux)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_set_gpmux(struct pruss *pruss,
enum pruss_pru_id pru_id, u8 mux)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_gpimode(struct pruss *pruss,
enum pruss_pru_id pru_id,
enum pruss_gpi_mode mode)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_xfr_enable(struct pruss *pruss,
enum pru_type pru_type,
- bool enable);
+ bool enable)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
#endif /* CONFIG_TI_PRUSS */
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 8c3c372ad735..bcba3935c6f9 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -168,29 +168,6 @@ int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id);
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
-#if IS_ENABLED(CONFIG_OF)
-struct regulator *__must_check of_regulator_get_optional(struct device *dev,
- struct device_node *node,
- const char *id);
-struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
- struct device_node *node,
- const char *id);
-#else
-static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev,
- struct device_node *node,
- const char *id)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
- struct device_node *node,
- const char *id)
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
-
int regulator_register_supply_alias(struct device *dev, const char *id,
struct device *alias_dev,
const char *alias_id);
@@ -223,8 +200,6 @@ int regulator_disable_deferred(struct regulator *regulator, int ms);
int __must_check regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
-int __must_check of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
- struct regulator_bulk_data **consumers);
int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
void devm_regulator_bulk_put(struct regulator_bulk_data *consumers);
@@ -373,20 +348,6 @@ devm_regulator_get_optional(struct device *dev, const char *id)
return ERR_PTR(-ENODEV);
}
-static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev,
- struct device_node *node,
- const char *id)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
- struct device_node *node,
- const char *id)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline void regulator_put(struct regulator *regulator)
{
}
@@ -483,12 +444,6 @@ static inline int devm_regulator_bulk_get(struct device *dev, int num_consumers,
return 0;
}
-static inline int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
- struct regulator_bulk_data **consumers)
-{
- return 0;
-}
-
static inline int devm_regulator_bulk_get_const(
struct device *dev, int num_consumers,
const struct regulator_bulk_data *in_consumers,
@@ -700,6 +655,38 @@ regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
}
#endif
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_REGULATOR)
+struct regulator *__must_check of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id);
+struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id);
+int __must_check of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
+ struct regulator_bulk_data **consumers);
+#else
+static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
+ struct regulator_bulk_data **consumers)
+{
+ return 0;
+}
+
+#endif
+
static inline int regulator_set_voltage_triplet(struct regulator *regulator,
int min_uV, int target_uV,
int max_uV)
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 341980599c71..e45531455d3b 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -55,10 +55,10 @@ struct seccomp_data;
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
static inline int secure_computing(void) { return 0; }
-static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
#else
static inline void secure_computing_strict(int this_syscall) { return; }
#endif
+static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
static inline long prctl_get_seccomp(void)
{
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index c858270141bc..c39a426ebf52 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -174,12 +174,4 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
#endif
}
-static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
- const struct xdp_buff *xdp)
-{
-#ifdef CONFIG_NET_RX_BUSY_POLL
- __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
-#endif
-}
-
#endif /* _LINUX_NET_BUSY_POLL_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 3c82fad904d4..c7f42844c79a 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -282,7 +282,7 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
{
- return inet_csk_reqsk_queue_len(sk) >= READ_ONCE(sk->sk_max_ack_backlog);
+ return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog);
}
bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 793e6fd78bc5..60a5347922be 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -294,7 +294,7 @@ static inline long page_pool_unref_page(struct page *page, long nr)
static inline void page_pool_ref_netmem(netmem_ref netmem)
{
- atomic_long_inc(&netmem_to_page(netmem)->pp_ref_count);
+ atomic_long_inc(netmem_get_pp_ref_count_ref(netmem));
}
static inline void page_pool_ref_page(struct page *page)
diff --git a/include/net/sock.h b/include/net/sock.h
index c383126f691d..691ca7695d1d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2297,7 +2297,7 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
}
/**
- * sock_poll_wait - place memory barrier behind the poll_wait call.
+ * sock_poll_wait - wrapper for the poll_wait call.
* @filp: file
* @sock: socket to wait on
* @p: poll_table
@@ -2307,15 +2307,12 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
static inline void sock_poll_wait(struct file *filp, struct socket *sock,
poll_table *p)
{
- if (!poll_does_not_wait(p)) {
- poll_wait(filp, &sock->wq.wait, p);
- /* We need to be sure we are in sync with the
- * socket flags modification.
- *
- * This memory barrier is paired in the wq_has_sleeper.
- */
- smp_mb();
- }
+ /* Provides a barrier we need to be sure we are in sync
+ * with the socket flags modification.
+ *
+ * This memory barrier is paired in the wq_has_sleeper.
+ */
+ poll_wait(filp, &sock->wq.wait, p);
}
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
diff --git a/include/net/xdp.h b/include/net/xdp.h
index e6770dd40c91..b5b10f2b88e5 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -62,7 +62,6 @@ struct xdp_rxq_info {
u32 queue_index;
u32 reg_state;
struct xdp_mem_info mem;
- unsigned int napi_id;
u32 frag_size;
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 40085afd9160..7a7316d9c0da 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -59,15 +59,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
xp_fill_cb(pool, desc);
}
-static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
-{
-#ifdef CONFIG_NET_RX_BUSY_POLL
- return pool->heads[0].xdp.rxq->napi_id;
-#else
- return 0;
-#endif
-}
-
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
@@ -306,11 +297,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
{
}
-static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
-{
- return 0;
-}
-
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index d650ae6b58d3..74e5b9960c54 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -329,7 +329,6 @@ struct ufs_pwr_mode_info {
* @program_key: program or evict an inline encryption key
* @fill_crypto_prdt: initialize crypto-related fields in the PRDT
* @event_notify: called to notify important events
- * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
* @mcq_config_resource: called to configure MCQ platform resources
* @get_hba_mac: reports maximum number of outstanding commands supported by
* the controller. Should be implemented for UFSHCI 4.0 or later
@@ -381,7 +380,6 @@ struct ufs_hba_variant_ops {
void *prdt, unsigned int num_segments);
void (*event_notify)(struct ufs_hba *hba,
enum ufs_event_type evt, void *data);
- void (*reinit_notify)(struct ufs_hba *);
int (*mcq_config_resource)(struct ufs_hba *hba);
int (*get_hba_mac)(struct ufs_hba *hba);
int (*op_runtime_config)(struct ufs_hba *hba);
diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c
index fab936d31ba8..100d5da94cb9 100644
--- a/io_uring/eventfd.c
+++ b/io_uring/eventfd.c
@@ -33,20 +33,18 @@ static void io_eventfd_free(struct rcu_head *rcu)
kfree(ev_fd);
}
-static void io_eventfd_do_signal(struct rcu_head *rcu)
+static void io_eventfd_put(struct io_ev_fd *ev_fd)
{
- struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
-
- eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
-
if (refcount_dec_and_test(&ev_fd->refs))
- io_eventfd_free(rcu);
+ call_rcu(&ev_fd->rcu, io_eventfd_free);
}
-static void io_eventfd_put(struct io_ev_fd *ev_fd)
+static void io_eventfd_do_signal(struct rcu_head *rcu)
{
- if (refcount_dec_and_test(&ev_fd->refs))
- call_rcu(&ev_fd->rcu, io_eventfd_free);
+ struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
+
+ eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
+ io_eventfd_put(ev_fd);
}
static void io_eventfd_release(struct io_ev_fd *ev_fd, bool put_ref)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index d3403c8216db..4758f1ba902b 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -320,7 +320,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct io_async_rw));
ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
- sizeof(struct uring_cache));
+ sizeof(struct io_uring_cmd_data));
spin_lock_init(&ctx->msg_lock);
ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct io_kiocb));
@@ -1226,10 +1226,7 @@ static void io_req_normal_work_add(struct io_kiocb *req)
/* SQPOLL doesn't need the task_work added, it'll run it itself */
if (ctx->flags & IORING_SETUP_SQPOLL) {
- struct io_sq_data *sqd = ctx->sq_data;
-
- if (sqd->thread)
- __set_notify_signal(sqd->thread);
+ __set_notify_signal(tctx->task);
return;
}
@@ -2813,13 +2810,12 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
if (unlikely(!ctx->poll_activated))
io_activate_pollwq(ctx);
-
- poll_wait(file, &ctx->poll_wq, wait);
/*
- * synchronizes with barrier from wq_has_sleeper call in
- * io_commit_cqring
+ * provides mb() which pairs with barrier from wq_has_sleeper
+ * call in io_commit_cqring
*/
- smp_rmb();
+ poll_wait(file, &ctx->poll_wq, wait);
+
if (!io_sqring_full(ctx))
mask |= EPOLLOUT | EPOLLWRNORM;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 12abee607e4a..492cbbf2c23b 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -125,6 +125,9 @@ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
#if defined(CONFIG_PROVE_LOCKING)
lockdep_assert(in_task());
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ lockdep_assert_held(&ctx->uring_lock);
+
if (ctx->flags & IORING_SETUP_IOPOLL) {
lockdep_assert_held(&ctx->uring_lock);
} else if (!ctx->task_complete) {
@@ -136,9 +139,7 @@ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
* Not from an SQE, as those cannot be submitted, but via
* updating tagged resources.
*/
- if (percpu_ref_is_dying(&ctx->refs))
- lockdep_assert(current_work());
- else
+ if (!percpu_ref_is_dying(&ctx->refs))
lockdep_assert(current == ctx->submitter_task);
}
#endif
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 3de75eca1c92..e8baef4e5146 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/io_uring.h>
+#include <linux/io_uring/cmd.h>
#include "io_uring.h"
#include "opdef.h"
@@ -414,7 +415,7 @@ const struct io_issue_def io_issue_defs[] = {
.plug = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .async_size = 2 * sizeof(struct io_uring_sqe),
+ .async_size = sizeof(struct io_uring_cmd_data),
.prep = io_uring_cmd_prep,
.issue = io_uring_cmd,
},
diff --git a/io_uring/register.c b/io_uring/register.c
index fdd44914c39c..371aec87e078 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -405,8 +405,8 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
{
struct io_ring_ctx_rings o = { }, n = { }, *to_free = NULL;
size_t size, sq_array_offset;
+ unsigned i, tail, old_head;
struct io_uring_params p;
- unsigned i, tail;
void *ptr;
int ret;
@@ -449,10 +449,18 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
if (IS_ERR(n.rings))
return PTR_ERR(n.rings);
- n.rings->sq_ring_mask = p.sq_entries - 1;
- n.rings->cq_ring_mask = p.cq_entries - 1;
- n.rings->sq_ring_entries = p.sq_entries;
- n.rings->cq_ring_entries = p.cq_entries;
+ /*
+ * At this point n.rings is shared with userspace, just like o.rings
+ * is as well. While we don't expect userspace to modify it while
+ * a resize is in progress, and it's most likely that userspace will
+ * shoot itself in the foot if it does, we can't always assume good
+ * intent... Use read/write once helpers from here on to indicate the
+ * shared nature of it.
+ */
+ WRITE_ONCE(n.rings->sq_ring_mask, p.sq_entries - 1);
+ WRITE_ONCE(n.rings->cq_ring_mask, p.cq_entries - 1);
+ WRITE_ONCE(n.rings->sq_ring_entries, p.sq_entries);
+ WRITE_ONCE(n.rings->cq_ring_entries, p.cq_entries);
if (copy_to_user(arg, &p, sizeof(p))) {
io_register_free_rings(&p, &n);
@@ -509,20 +517,22 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
* rings can't hold what is already there, then fail the operation.
*/
n.sq_sqes = ptr;
- tail = o.rings->sq.tail;
- if (tail - o.rings->sq.head > p.sq_entries)
+ tail = READ_ONCE(o.rings->sq.tail);
+ old_head = READ_ONCE(o.rings->sq.head);
+ if (tail - old_head > p.sq_entries)
goto overflow;
- for (i = o.rings->sq.head; i < tail; i++) {
+ for (i = old_head; i < tail; i++) {
unsigned src_head = i & (ctx->sq_entries - 1);
- unsigned dst_head = i & n.rings->sq_ring_mask;
+ unsigned dst_head = i & (p.sq_entries - 1);
n.sq_sqes[dst_head] = o.sq_sqes[src_head];
}
- n.rings->sq.head = o.rings->sq.head;
- n.rings->sq.tail = o.rings->sq.tail;
+ WRITE_ONCE(n.rings->sq.head, READ_ONCE(o.rings->sq.head));
+ WRITE_ONCE(n.rings->sq.tail, READ_ONCE(o.rings->sq.tail));
- tail = o.rings->cq.tail;
- if (tail - o.rings->cq.head > p.cq_entries) {
+ tail = READ_ONCE(o.rings->cq.tail);
+ old_head = READ_ONCE(o.rings->cq.head);
+ if (tail - old_head > p.cq_entries) {
overflow:
/* restore old rings, and return -EOVERFLOW via cleanup path */
ctx->rings = o.rings;
@@ -531,21 +541,21 @@ overflow:
ret = -EOVERFLOW;
goto out;
}
- for (i = o.rings->cq.head; i < tail; i++) {
+ for (i = old_head; i < tail; i++) {
unsigned src_head = i & (ctx->cq_entries - 1);
- unsigned dst_head = i & n.rings->cq_ring_mask;
+ unsigned dst_head = i & (p.cq_entries - 1);
n.rings->cqes[dst_head] = o.rings->cqes[src_head];
}
- n.rings->cq.head = o.rings->cq.head;
- n.rings->cq.tail = o.rings->cq.tail;
+ WRITE_ONCE(n.rings->cq.head, READ_ONCE(o.rings->cq.head));
+ WRITE_ONCE(n.rings->cq.tail, READ_ONCE(o.rings->cq.tail));
/* invalidate cached cqe refill */
ctx->cqe_cached = ctx->cqe_sentinel = NULL;
- n.rings->sq_dropped = o.rings->sq_dropped;
- n.rings->sq_flags = o.rings->sq_flags;
- n.rings->cq_flags = o.rings->cq_flags;
- n.rings->cq_overflow = o.rings->cq_overflow;
+ WRITE_ONCE(n.rings->sq_dropped, READ_ONCE(o.rings->sq_dropped));
+ WRITE_ONCE(n.rings->sq_flags, READ_ONCE(o.rings->sq_flags));
+ WRITE_ONCE(n.rings->cq_flags, READ_ONCE(o.rings->cq_flags));
+ WRITE_ONCE(n.rings->cq_overflow, READ_ONCE(o.rings->cq_overflow));
/* all done, store old pointers and assign new ones */
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 077f84684c18..69937d0c94f9 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -997,7 +997,7 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
dst_node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
if (!dst_node) {
ret = -ENOMEM;
- goto out_put_free;
+ goto out_unlock;
}
refcount_inc(&src_node->buf->refs);
@@ -1033,14 +1033,6 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
mutex_lock(&src_ctx->uring_lock);
/* someone raced setting up buffers, dump ours */
ret = -EBUSY;
-out_put_free:
- i = data.nr;
- while (i--) {
- if (data.nodes[i]) {
- io_buffer_unmap(src_ctx, data.nodes[i]);
- kfree(data.nodes[i]);
- }
- }
out_unlock:
io_rsrc_data_free(ctx, &data);
mutex_unlock(&src_ctx->uring_lock);
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index 9e5bd79fd2b5..8961a3c1e73c 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -268,8 +268,12 @@ static int io_sq_thread(void *data)
DEFINE_WAIT(wait);
/* offload context creation failed, just exit */
- if (!current->io_uring)
+ if (!current->io_uring) {
+ mutex_lock(&sqd->lock);
+ sqd->thread = NULL;
+ mutex_unlock(&sqd->lock);
goto err_out;
+ }
snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
set_task_comm(current, buf);
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 362689b17ccc..e9cec9e4dc2f 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -427,10 +427,12 @@ static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
timeout->off = 0; /* noseq */
data = req->async_data;
+ data->ts = *ts;
+
list_add_tail(&timeout->list, &ctx->timeout_list);
hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
data->timer.function = io_timeout_fn;
- hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
+ hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), mode);
return 0;
}
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index af842e9b4eb9..ce7726a04883 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -16,26 +16,35 @@
#include "rsrc.h"
#include "uring_cmd.h"
-static struct uring_cache *io_uring_async_get(struct io_kiocb *req)
+static struct io_uring_cmd_data *io_uring_async_get(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- struct uring_cache *cache;
+ struct io_uring_cmd_data *cache;
cache = io_alloc_cache_get(&ctx->uring_cache);
if (cache) {
+ cache->op_data = NULL;
req->flags |= REQ_F_ASYNC_DATA;
req->async_data = cache;
return cache;
}
- if (!io_alloc_async_data(req))
- return req->async_data;
+ if (!io_alloc_async_data(req)) {
+ cache = req->async_data;
+ cache->op_data = NULL;
+ return cache;
+ }
return NULL;
}
static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct uring_cache *cache = req->async_data;
+ struct io_uring_cmd_data *cache = req->async_data;
+
+ if (cache->op_data) {
+ kfree(cache->op_data);
+ cache->op_data = NULL;
+ }
if (issue_flags & IO_URING_F_UNLOCKED)
return;
@@ -183,7 +192,7 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct uring_cache *cache;
+ struct io_uring_cmd_data *cache;
cache = io_uring_async_get(req);
if (unlikely(!cache))
@@ -260,7 +269,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
if (ret == -EAGAIN) {
- struct uring_cache *cache = req->async_data;
+ struct io_uring_cmd_data *cache = req->async_data;
if (ioucmd->sqe != (void *) cache)
memcpy(cache, ioucmd->sqe, uring_sqe_size(req->ctx));
diff --git a/io_uring/uring_cmd.h b/io_uring/uring_cmd.h
index 7dba0f1efc58..f6837ee0955b 100644
--- a/io_uring/uring_cmd.h
+++ b/io_uring/uring_cmd.h
@@ -1,9 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-struct uring_cache {
- struct io_uring_sqe sqes[2];
-};
-
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index f321ed515f3a..0f910c828973 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -197,10 +197,8 @@ static struct cpuset top_cpuset = {
/*
* There are two global locks guarding cpuset structures - cpuset_mutex and
- * callback_lock. We also require taking task_lock() when dereferencing a
- * task's cpuset pointer. See "The task_lock() exception", at the end of this
- * comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems
- * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
+ * callback_lock. The cpuset code uses only cpuset_mutex. Other kernel
+ * subsystems can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
* structures. Note that cpuset_mutex needs to be a mutex as it is used in
* paths that rely on priority inheritance (e.g. scheduler - on RT) for
* correctness.
@@ -229,9 +227,6 @@ static struct cpuset top_cpuset = {
* The cpuset_common_seq_show() handlers only hold callback_lock across
* small pieces of code, such as when reading out possibly multi-word
* cpumasks and nodemasks.
- *
- * Accessing a task's cpuset should be done in accordance with the
- * guidelines for accessing subsystem state in kernel/cgroup.c
*/
static DEFINE_MUTEX(cpuset_mutex);
@@ -890,7 +885,15 @@ v2:
*/
if (cgrpv2) {
for (i = 0; i < ndoms; i++) {
- cpumask_copy(doms[i], csa[i]->effective_cpus);
+ /*
+ * The top cpuset may contain some boot time isolated
+ * CPUs that need to be excluded from the sched domain.
+ */
+ if (csa[i] == &top_cpuset)
+ cpumask_and(doms[i], csa[i]->effective_cpus,
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
+ else
+ cpumask_copy(doms[i], csa[i]->effective_cpus);
if (dattr)
dattr[i] = SD_ATTR_INIT;
}
@@ -3121,29 +3124,6 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
int retval = -ENODEV;
buf = strstrip(buf);
-
- /*
- * CPU or memory hotunplug may leave @cs w/o any execution
- * resources, in which case the hotplug code asynchronously updates
- * configuration and transfers all tasks to the nearest ancestor
- * which can execute.
- *
- * As writes to "cpus" or "mems" may restore @cs's execution
- * resources, wait for the previously scheduled operations before
- * proceeding, so that we don't end up keep removing tasks added
- * after execution capability is restored.
- *
- * cpuset_handle_hotplug may call back into cgroup core asynchronously
- * via cgroup_transfer_tasks() and waiting for it from a cgroupfs
- * operation like this one can lead to a deadlock through kernfs
- * active_ref protection. Let's break the protection. Losing the
- * protection is okay as we check whether @cs is online after
- * grabbing cpuset_mutex anyway. This only happens on the legacy
- * hierarchies.
- */
- css_get(&cs->css);
- kernfs_break_active_protection(of->kn);
-
cpus_read_lock();
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
@@ -3176,8 +3156,6 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
out_unlock:
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
- kernfs_unbreak_active_protection(of->kn);
- css_put(&cs->css);
flush_workqueue(cpuset_migrate_mm_wq);
return retval ?: nbytes;
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index fa04b14a7d72..5d71ef85420c 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1915,6 +1915,7 @@ void uprobe_free_utask(struct task_struct *t)
if (!utask)
return;
+ t->utask = NULL;
WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr);
timer_delete_sync(&utask->ri_timer);
@@ -1924,7 +1925,6 @@ void uprobe_free_utask(struct task_struct *t)
ri = free_ret_instance(ri, true /* cleanup_hprobe */);
kfree(utask);
- t->utask = NULL;
}
#define RI_TIMER_PERIOD (HZ / 10) /* 100 ms */
diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
index 383fd43ac612..7e1340da5aca 100755
--- a/kernel/gen_kheaders.sh
+++ b/kernel/gen_kheaders.sh
@@ -89,6 +89,7 @@ find $cpio_dir -type f -print0 |
# Create archive and try to normalize metadata for reproducibility.
tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+ --exclude=".__afs*" --exclude=".nfs*" \
--owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
-I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 19d2699cf638..19813b387ef9 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -2747,6 +2747,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
{
struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
bool prev_on_scx = prev->sched_class == &ext_sched_class;
+ bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
int nr_loops = SCX_DSP_MAX_LOOPS;
lockdep_assert_rq_held(rq);
@@ -2779,8 +2780,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
* See scx_ops_disable_workfn() for the explanation on the
* bypassing test.
*/
- if ((prev->scx.flags & SCX_TASK_QUEUED) &&
- prev->scx.slice && !scx_rq_bypassing(rq)) {
+ if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
rq->scx.flags |= SCX_RQ_BAL_KEEP;
goto has_tasks;
}
@@ -2813,6 +2813,10 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
flush_dispatch_buf(rq);
+ if (prev_on_rq && prev->scx.slice) {
+ rq->scx.flags |= SCX_RQ_BAL_KEEP;
+ goto has_tasks;
+ }
if (rq->scx.local_dsq.nr)
goto has_tasks;
if (consume_global_dsq(rq))
@@ -2838,8 +2842,7 @@ no_tasks:
* Didn't find another task to run. Keep running @prev unless
* %SCX_OPS_ENQ_LAST is in effect.
*/
- if ((prev->scx.flags & SCX_TASK_QUEUED) &&
- (!static_branch_unlikely(&scx_ops_enq_last) ||
+ if (prev_on_rq && (!static_branch_unlikely(&scx_ops_enq_last) ||
scx_rq_bypassing(rq))) {
rq->scx.flags |= SCX_RQ_BAL_KEEP;
goto has_tasks;
@@ -3034,7 +3037,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
*/
if (p->scx.slice && !scx_rq_bypassing(rq)) {
dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
- return;
+ goto switch_class;
}
/*
@@ -3051,6 +3054,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
}
}
+switch_class:
if (next && next->sched_class != &ext_sched_class)
switch_class(rq, next);
}
@@ -3586,16 +3590,8 @@ static void reset_idle_masks(void)
cpumask_copy(idle_masks.smt, cpu_online_mask);
}
-void __scx_update_idle(struct rq *rq, bool idle)
+static void update_builtin_idle(int cpu, bool idle)
{
- int cpu = cpu_of(rq);
-
- if (SCX_HAS_OP(update_idle) && !scx_rq_bypassing(rq)) {
- SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
- if (!static_branch_unlikely(&scx_builtin_idle_enabled))
- return;
- }
-
if (idle)
cpumask_set_cpu(cpu, idle_masks.cpu);
else
@@ -3622,6 +3618,57 @@ void __scx_update_idle(struct rq *rq, bool idle)
#endif
}
+/*
+ * Update the idle state of a CPU to @idle.
+ *
+ * If @do_notify is true, ops.update_idle() is invoked to notify the scx
+ * scheduler of an actual idle state transition (idle to busy or vice
+ * versa). If @do_notify is false, only the idle state in the idle masks is
+ * refreshed without invoking ops.update_idle().
+ *
+ * This distinction is necessary, because an idle CPU can be "reserved" and
+ * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
+ * busy even if no tasks are dispatched. In this case, the CPU may return
+ * to idle without a true state transition. Refreshing the idle masks
+ * without invoking ops.update_idle() ensures accurate idle state tracking
+ * while avoiding unnecessary updates and maintaining balanced state
+ * transitions.
+ */
+void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
+{
+ int cpu = cpu_of(rq);
+
+ lockdep_assert_rq_held(rq);
+
+ /*
+ * Trigger ops.update_idle() only when transitioning from a task to
+ * the idle thread and vice versa.
+ *
+ * Idle transitions are indicated by do_notify being set to true,
+ * managed by put_prev_task_idle()/set_next_task_idle().
+ */
+ if (SCX_HAS_OP(update_idle) && do_notify && !scx_rq_bypassing(rq))
+ SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
+
+ /*
+ * Update the idle masks:
+ * - for real idle transitions (do_notify == true)
+ * - for idle-to-idle transitions (indicated by the previous task
+ * being the idle thread, managed by pick_task_idle())
+ *
+ * Skip updating idle masks if the previous task is not the idle
+ * thread, since set_next_task_idle() has already handled it when
+ * transitioning from a task to the idle thread (calling this
+ * function with do_notify == true).
+ *
+ * In this way we can avoid updating the idle masks twice,
+ * unnecessarily.
+ */
+ if (static_branch_likely(&scx_builtin_idle_enabled))
+ if (do_notify || is_idle_task(rq->curr))
+ update_builtin_idle(cpu, idle);
+}
+
static void handle_hotplug(struct rq *rq, bool online)
{
int cpu = cpu_of(rq);
@@ -4744,10 +4791,9 @@ static void scx_ops_bypass(bool bypass)
*/
for_each_possible_cpu(cpu) {
struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
struct task_struct *p, *n;
- rq_lock(rq, &rf);
+ raw_spin_rq_lock(rq);
if (bypass) {
WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
@@ -4763,7 +4809,7 @@ static void scx_ops_bypass(bool bypass)
* sees scx_rq_bypassing() before moving tasks to SCX.
*/
if (!scx_enabled()) {
- rq_unlock(rq, &rf);
+ raw_spin_rq_unlock(rq);
continue;
}
@@ -4783,10 +4829,11 @@ static void scx_ops_bypass(bool bypass)
sched_enq_and_set_task(&ctx);
}
- rq_unlock(rq, &rf);
-
/* resched to restore ticks and idle state */
- resched_cpu(cpu);
+ if (cpu_online(cpu) || cpu == smp_processor_id())
+ resched_curr(rq);
+
+ raw_spin_rq_unlock(rq);
}
atomic_dec(&scx_ops_breather_depth);
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
index b1675bb59fc4..4d022d17ac7d 100644
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -57,15 +57,15 @@ static inline void init_sched_ext_class(void) {}
#endif /* CONFIG_SCHED_CLASS_EXT */
#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
-void __scx_update_idle(struct rq *rq, bool idle);
+void __scx_update_idle(struct rq *rq, bool idle, bool do_notify);
-static inline void scx_update_idle(struct rq *rq, bool idle)
+static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify)
{
if (scx_enabled())
- __scx_update_idle(rq, idle);
+ __scx_update_idle(rq, idle, do_notify);
}
#else
-static inline void scx_update_idle(struct rq *rq, bool idle) {}
+static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
#endif
#ifdef CONFIG_CGROUP_SCHED
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 621696269584..2c85c86b455f 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -452,19 +452,20 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
dl_server_update_idle_time(rq, prev);
- scx_update_idle(rq, false);
+ scx_update_idle(rq, false, true);
}
static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
{
update_idle_core(rq);
- scx_update_idle(rq, true);
+ scx_update_idle(rq, true, true);
schedstat_inc(rq->sched_goidle);
next->se.exec_start = rq_clock_task(rq);
}
struct task_struct *pick_task_idle(struct rq *rq)
{
+ scx_update_idle(rq, true, false);
return rq->idle;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f8aebcb01e62..b6e40e8791fa 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4122,6 +4122,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
preempt_model_none() ? "server" :
preempt_model_voluntary() ? "desktop" :
preempt_model_full() ? "preempt" :
+ preempt_model_lazy() ? "lazy" :
preempt_model_rt() ? "preempt_rt" :
"unknown",
/* These are reserved for later use */
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index fce064e20570..a4e799c1e767 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -182,6 +182,7 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
struct trace_array_cpu *data;
unsigned long flags;
unsigned int trace_ctx;
+ u64 *calltime;
int ret;
if (ftrace_graph_ignore_func(gops, trace))
@@ -199,6 +200,12 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
if (!func_prolog_dec(tr, &data, &flags))
return 0;
+ calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
+ if (!calltime)
+ return 0;
+
+ *calltime = trace_clock_local();
+
trace_ctx = tracing_gen_ctx_flags(flags);
ret = __trace_graph_entry(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
@@ -213,12 +220,19 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
struct trace_array_cpu *data;
unsigned long flags;
unsigned int trace_ctx;
+ u64 *calltime;
+ int size;
ftrace_graph_addr_finish(gops, trace);
if (!func_prolog_dec(tr, &data, &flags))
return;
+ calltime = fgraph_retrieve_data(gops->idx, &size);
+ if (!calltime)
+ return;
+ trace->calltime = *calltime;
+
trace_ctx = tracing_gen_ctx_flags(flags);
__trace_graph_return(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 935a886af40c..0642ea174849 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -940,8 +940,10 @@ static int __trace_kprobe_create(int argc, const char *argv[])
}
/* a symbol specified */
symbol = kstrdup(argv[1], GFP_KERNEL);
- if (!symbol)
- return -ENOMEM;
+ if (!symbol) {
+ ret = -ENOMEM;
+ goto error;
+ }
tmp = strchr(symbol, '%');
if (tmp) {
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index d6c7f18daa15..c58292e424d5 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -118,6 +118,7 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
unsigned int trace_ctx;
+ u64 *calltime;
int ret = 0;
if (ftrace_graph_ignore_func(gops, trace))
@@ -135,6 +136,12 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return 0;
+ calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
+ if (!calltime)
+ return 0;
+
+ *calltime = trace_clock_local();
+
ret = __trace_graph_entry(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
preempt_enable_notrace();
@@ -148,12 +155,19 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace,
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
unsigned int trace_ctx;
+ u64 *calltime;
+ int size;
ftrace_graph_addr_finish(gops, trace);
if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return;
+ calltime = fgraph_retrieve_data(gops->idx, &size);
+ if (!calltime)
+ return;
+ trace->calltime = *calltime;
+
__trace_graph_return(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f7d8fc204579..9362484a653c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2508,6 +2508,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
return;
}
+ WARN_ON_ONCE(cpu != WORK_CPU_UNBOUND && !cpu_online(cpu));
dwork->wq = wq;
dwork->cpu = cpu;
timer->expires = jiffies + delay;
@@ -2533,6 +2534,12 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
* @dwork: work to queue
* @delay: number of jiffies to wait before queueing
*
+ * We queue the delayed_work to a specific CPU, for non-zero delays the
+ * caller must ensure it is online and can't go away. Callers that fail
+ * to ensure this, may get @dwork->timer queued to an offlined CPU and
+ * this will prevent queueing of @dwork->work unless the offlined CPU
+ * becomes online again.
+ *
* Return: %false if @work was already on a queue, %true otherwise. If
* @delay is zero and @dwork is idle, it will be scheduled for immediate
* execution.
diff --git a/net/802/psnap.c b/net/802/psnap.c
index fca9d454905f..389df460c8c4 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -55,11 +55,11 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
goto drop;
rcu_read_lock();
- proto = find_snap_client(skb_transport_header(skb));
+ proto = find_snap_client(skb->data);
if (proto) {
/* Pass the frame on. */
- skb->transport_header += 5;
skb_pull_rcsum(skb, 5);
+ skb_reset_transport_header(skb);
rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
}
rcu_read_unlock();
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index c86f4e42e69c..7b2b04d6b856 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -1031,9 +1031,9 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
{
- /* If we're advertising or initiating an LE connection we can't
- * go ahead and change the random address at this time. This is
- * because the eventual initiator address used for the
+ /* If a random_addr has been set we're advertising or initiating an LE
+ * connection we can't go ahead and change the random address at this
+ * time. This is because the eventual initiator address used for the
* subsequently created connection will be undefined (some
* controllers use the new address and others the one we had
* when the operation started).
@@ -1041,8 +1041,9 @@ static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
* In this kind of scenario skip the update and let the random
* address be updated at the next cycle.
*/
- if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
- hci_lookup_le_connect(hdev)) {
+ if (bacmp(&hdev->random_addr, BDADDR_ANY) &&
+ (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
+ hci_lookup_le_connect(hdev))) {
bt_dev_dbg(hdev, "Deferring random address update");
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
return 0;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index b31192d473d0..de47ad999d7b 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7655,6 +7655,24 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}
+static void add_device_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_device *cp = cmd->param;
+
+ if (!err) {
+ device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
+ cp->action);
+ device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
+ cp->addr.type, hdev->conn_flags,
+ PTR_UINT(cmd->user_data));
+ }
+
+ mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
+ mgmt_status(err), &cp->addr, sizeof(cp->addr));
+ mgmt_pending_free(cmd);
+}
+
static int add_device_sync(struct hci_dev *hdev, void *data)
{
return hci_update_passive_scan_sync(hdev);
@@ -7663,6 +7681,7 @@ static int add_device_sync(struct hci_dev *hdev, void *data)
static int add_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
+ struct mgmt_pending_cmd *cmd;
struct mgmt_cp_add_device *cp = data;
u8 auto_conn, addr_type;
struct hci_conn_params *params;
@@ -7743,9 +7762,24 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
current_flags = params->flags;
}
- err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
- if (err < 0)
+ cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
goto unlock;
+ }
+
+ cmd->user_data = UINT_PTR(current_flags);
+
+ err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
+ add_device_complete);
+ if (err < 0) {
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+ MGMT_STATUS_FAILED, &cp->addr,
+ sizeof(cp->addr));
+ mgmt_pending_free(cmd);
+ }
+
+ goto unlock;
added:
device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index af80d599c337..21a5b5535ebc 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -201,14 +201,14 @@ static ssize_t address_show(struct device *tty_dev,
struct device_attribute *attr, char *buf)
{
struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
- return sprintf(buf, "%pMR\n", &dev->dst);
+ return sysfs_emit(buf, "%pMR\n", &dev->dst);
}
static ssize_t channel_show(struct device *tty_dev,
struct device_attribute *attr, char *buf)
{
struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
- return sprintf(buf, "%d\n", dev->channel);
+ return sysfs_emit(buf, "%d\n", dev->channel);
}
static DEVICE_ATTR_RO(address);
diff --git a/net/core/dev.c b/net/core/dev.c
index faa23042df38..a9f62f5aeb84 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -753,6 +753,36 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
}
EXPORT_SYMBOL_GPL(dev_fill_forward_path);
+/* must be called under rcu_read_lock(), as we dont take a reference */
+static struct napi_struct *napi_by_id(unsigned int napi_id)
+{
+ unsigned int hash = napi_id % HASH_SIZE(napi_hash);
+ struct napi_struct *napi;
+
+ hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
+ if (napi->napi_id == napi_id)
+ return napi;
+
+ return NULL;
+}
+
+/* must be called under rcu_read_lock(), as we dont take a reference */
+struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id)
+{
+ struct napi_struct *napi;
+
+ napi = napi_by_id(napi_id);
+ if (!napi)
+ return NULL;
+
+ if (WARN_ON_ONCE(!napi->dev))
+ return NULL;
+ if (!net_eq(net, dev_net(napi->dev)))
+ return NULL;
+
+ return napi;
+}
+
/**
* __dev_get_by_name - find a device by its name
* @net: the applicable net namespace
@@ -6293,19 +6323,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
}
EXPORT_SYMBOL(napi_complete_done);
-/* must be called under rcu_read_lock(), as we dont take a reference */
-struct napi_struct *napi_by_id(unsigned int napi_id)
-{
- unsigned int hash = napi_id % HASH_SIZE(napi_hash);
- struct napi_struct *napi;
-
- hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
- if (napi->napi_id == napi_id)
- return napi;
-
- return NULL;
-}
-
static void skb_defer_free_flush(struct softnet_data *sd)
{
struct sk_buff *skb, *next;
diff --git a/net/core/dev.h b/net/core/dev.h
index d043dee25a68..deb5eae5749f 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -22,6 +22,8 @@ struct sd_flow_limit {
extern int netdev_flow_limit_table_len;
+struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id);
+
#ifdef CONFIG_PROC_FS
int __init dev_proc_init(void);
#else
@@ -269,7 +271,6 @@ void xdp_do_check_flushed(struct napi_struct *napi);
static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
#endif
-struct napi_struct *napi_by_id(unsigned int napi_id);
void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
#define XMIT_RECURSION_LIMIT 8
diff --git a/net/core/filter.c b/net/core/filter.c
index 834614071727..2fb45a86f3dd 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -11251,6 +11251,7 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
struct sock_reuseport *reuse;
struct sock *selected_sk;
+ int err;
selected_sk = map->ops->map_lookup_elem(map, key);
if (!selected_sk)
@@ -11258,10 +11259,6 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
if (!reuse) {
- /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
- if (sk_is_refcounted(selected_sk))
- sock_put(selected_sk);
-
/* reuseport_array has only sk with non NULL sk_reuseport_cb.
* The only (!reuse) case here is - the sk has already been
* unhashed (e.g. by close()), so treat it as -ENOENT.
@@ -11269,24 +11266,33 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
* Other maps (e.g. sock_map) do not provide this guarantee and
* the sk may never be in the reuseport group to begin with.
*/
- return is_sockarray ? -ENOENT : -EINVAL;
+ err = is_sockarray ? -ENOENT : -EINVAL;
+ goto error;
}
if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
struct sock *sk = reuse_kern->sk;
- if (sk->sk_protocol != selected_sk->sk_protocol)
- return -EPROTOTYPE;
- else if (sk->sk_family != selected_sk->sk_family)
- return -EAFNOSUPPORT;
-
- /* Catch all. Likely bound to a different sockaddr. */
- return -EBADFD;
+ if (sk->sk_protocol != selected_sk->sk_protocol) {
+ err = -EPROTOTYPE;
+ } else if (sk->sk_family != selected_sk->sk_family) {
+ err = -EAFNOSUPPORT;
+ } else {
+ /* Catch all. Likely bound to a different sockaddr. */
+ err = -EBADFD;
+ }
+ goto error;
}
reuse_kern->selected_sk = selected_sk;
return 0;
+error:
+ /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
+ if (sk_is_refcounted(selected_sk))
+ sock_put(selected_sk);
+
+ return err;
}
static const struct bpf_func_proto sk_select_reuseport_proto = {
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 1b4d39e38084..cb04ef2b9807 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -42,14 +42,18 @@ static unsigned int default_operstate(const struct net_device *dev)
* first check whether lower is indeed the source of its down state.
*/
if (!netif_carrier_ok(dev)) {
- int iflink = dev_get_iflink(dev);
struct net_device *peer;
+ int iflink;
/* If called from netdev_run_todo()/linkwatch_sync_dev(),
* dev_net(dev) can be already freed, and RTNL is not held.
*/
- if (dev->reg_state == NETREG_UNREGISTERED ||
- iflink == dev->ifindex)
+ if (dev->reg_state <= NETREG_REGISTERED)
+ iflink = dev_get_iflink(dev);
+ else
+ iflink = dev->ifindex;
+
+ if (iflink == dev->ifindex)
return IF_OPER_DOWN;
ASSERT_RTNL();
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index a89cbd8d87c3..996ac6a449eb 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -197,6 +197,16 @@ static const struct genl_multicast_group netdev_nl_mcgrps[] = {
[NETDEV_NLGRP_PAGE_POOL] = { "page-pool", },
};
+static void __netdev_nl_sock_priv_init(void *priv)
+{
+ netdev_nl_sock_priv_init(priv);
+}
+
+static void __netdev_nl_sock_priv_destroy(void *priv)
+{
+ netdev_nl_sock_priv_destroy(priv);
+}
+
struct genl_family netdev_nl_family __ro_after_init = {
.name = NETDEV_FAMILY_NAME,
.version = NETDEV_FAMILY_VERSION,
@@ -208,6 +218,6 @@ struct genl_family netdev_nl_family __ro_after_init = {
.mcgrps = netdev_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(netdev_nl_mcgrps),
.sock_priv_size = sizeof(struct list_head),
- .sock_priv_init = (void *)netdev_nl_sock_priv_init,
- .sock_priv_destroy = (void *)netdev_nl_sock_priv_destroy,
+ .sock_priv_init = __netdev_nl_sock_priv_init,
+ .sock_priv_destroy = __netdev_nl_sock_priv_destroy,
};
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index b0772d135efb..a3bdaf075b6b 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -167,8 +167,6 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
void *hdr;
pid_t pid;
- if (WARN_ON_ONCE(!napi->dev))
- return -EINVAL;
if (!(napi->dev->flags & IFF_UP))
return 0;
@@ -176,8 +174,7 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
if (!hdr)
return -EMSGSIZE;
- if (napi->napi_id >= MIN_NAPI_ID &&
- nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
+ if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
goto nla_put_failure;
if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
@@ -235,7 +232,7 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
rcu_read_lock();
- napi = napi_by_id(napi_id);
+ napi = netdev_napi_by_id(genl_info_net(info), napi_id);
if (napi) {
err = netdev_nl_napi_fill_one(rsp, napi, info);
} else {
@@ -272,6 +269,8 @@ netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
return err;
list_for_each_entry(napi, &netdev->napi_list, dev_list) {
+ if (napi->napi_id < MIN_NAPI_ID)
+ continue;
if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
continue;
@@ -354,7 +353,7 @@ int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
rcu_read_lock();
- napi = napi_by_id(napi_id);
+ napi = netdev_napi_by_id(genl_info_net(info), napi_id);
if (napi) {
err = netdev_nl_napi_set_config(napi, info);
} else {
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2e459b9d88eb..96a6ed37d4cc 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -627,6 +627,8 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
const struct net_device_ops *ops;
int err;
+ skb_queue_head_init(&np->skb_pool);
+
if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
np_err(np, "%s doesn't support polling, aborting\n",
ndev->name);
@@ -662,6 +664,9 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
strscpy(np->dev_name, ndev->name, IFNAMSIZ);
npinfo->netpoll = np;
+ /* fill up the skb queue */
+ refill_skbs(np);
+
/* last thing to do is link it to the net device structure */
rcu_assign_pointer(ndev->npinfo, npinfo);
@@ -681,8 +686,6 @@ int netpoll_setup(struct netpoll *np)
struct in_device *in_dev;
int err;
- skb_queue_head_init(&np->skb_pool);
-
rtnl_lock();
if (np->dev_name[0]) {
struct net *net = current->nsproxy->net_ns;
@@ -782,9 +785,6 @@ put_noaddr:
}
}
- /* fill up the skb queue */
- refill_skbs(np);
-
err = __netpoll_setup(np, ndev);
if (err)
goto flush;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 7e23cacbe66e..4cb547fae91f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -851,6 +851,9 @@ static ssize_t get_imix_entries(const char __user *buffer,
unsigned long weight;
unsigned long size;
+ if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES)
+ return -E2BIG;
+
len = num_arg(&buffer[i], max_digits, &size);
if (len < 0)
return len;
@@ -880,9 +883,6 @@ static ssize_t get_imix_entries(const char __user *buffer,
i++;
pkt_dev->n_imix_entries++;
-
- if (pkt_dev->n_imix_entries > MAX_IMIX_ENTRIES)
- return -E2BIG;
} while (c == ' ');
return i;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index bcc5551c6424..2315feed94ef 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -186,7 +186,6 @@ int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
xdp_rxq_info_init(xdp_rxq);
xdp_rxq->dev = dev;
xdp_rxq->queue_index = queue_index;
- xdp_rxq->napi_id = napi_id;
xdp_rxq->frag_size = frag_size;
xdp_rxq->reg_state = REG_STATE_REGISTERED;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 0fbec3509618..e1564b95fab0 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2445,6 +2445,7 @@ martian_destination:
net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
&daddr, &saddr, dev->name);
#endif
+ goto out;
e_nobufs:
reason = SKB_DROP_REASON_NOMEM;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a38c8b1f44db..c26f6c4b7bb4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -896,7 +896,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
sock_net_set(ctl_sk, net);
if (sk) {
ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
- inet_twsk(sk)->tw_mark : sk->sk_mark;
+ inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
transmit_time = tcp_transmit_time(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e8953e88efef..86d282618515 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -533,7 +533,7 @@ begin:
return NULL;
}
-/* In hash4, rehash can happen in connect(), where hash4_cnt keeps unchanged. */
+/* udp_rehash4() only checks hslot4, and hash4_cnt is not processed. */
static void udp_rehash4(struct udp_table *udptable, struct sock *sk,
u16 newhash4)
{
@@ -582,15 +582,13 @@ void udp_lib_hash4(struct sock *sk, u16 hash)
struct net *net = sock_net(sk);
struct udp_table *udptable;
- /* Connected udp socket can re-connect to another remote address,
- * so rehash4 is needed.
+ /* Connected udp socket can re-connect to another remote address, which
+ * will be handled by rehash. Thus no need to redo hash4 here.
*/
- udptable = net->ipv4.udp_table;
- if (udp_hashed4(sk)) {
- udp_rehash4(udptable, sk, hash);
+ if (udp_hashed4(sk))
return;
- }
+ udptable = net->ipv4.udp_table;
hslot = udp_hashslot(udptable, net, udp_sk(sk)->udp_port_hash);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
hslot4 = udp_hashslot4(udptable, hash);
@@ -2173,14 +2171,14 @@ void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4)
struct udp_table *udptable = udp_get_table_prot(sk);
struct udp_hslot *hslot, *hslot2, *nhslot2;
+ hslot = udp_hashslot(udptable, sock_net(sk),
+ udp_sk(sk)->udp_port_hash);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
nhslot2 = udp_hashslot2(udptable, newhash);
udp_sk(sk)->udp_portaddr_hash = newhash;
if (hslot2 != nhslot2 ||
rcu_access_pointer(sk->sk_reuseport_cb)) {
- hslot = udp_hashslot(udptable, sock_net(sk),
- udp_sk(sk)->udp_port_hash);
/* we must lock primary chain too */
spin_lock_bh(&hslot->lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
@@ -2199,19 +2197,29 @@ void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4)
spin_unlock(&nhslot2->lock);
}
- if (udp_hashed4(sk)) {
- udp_rehash4(udptable, sk, newhash4);
+ spin_unlock_bh(&hslot->lock);
+ }
+
+ /* Now process hash4 if necessary:
+ * (1) update hslot4;
+ * (2) update hslot2->hash4_cnt.
+ * Note that hslot2/hslot4 should be checked separately, as
+ * either of them may change with the other unchanged.
+ */
+ if (udp_hashed4(sk)) {
+ spin_lock_bh(&hslot->lock);
- if (hslot2 != nhslot2) {
- spin_lock(&hslot2->lock);
- udp_hash4_dec(hslot2);
- spin_unlock(&hslot2->lock);
+ udp_rehash4(udptable, sk, newhash4);
+ if (hslot2 != nhslot2) {
+ spin_lock(&hslot2->lock);
+ udp_hash4_dec(hslot2);
+ spin_unlock(&hslot2->lock);
- spin_lock(&nhslot2->lock);
- udp_hash4_inc(nhslot2);
- spin_unlock(&nhslot2->lock);
- }
+ spin_lock(&nhslot2->lock);
+ udp_hash4_inc(nhslot2);
+ spin_unlock(&nhslot2->lock);
}
+
spin_unlock_bh(&hslot->lock);
}
}
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index c0e2da5072be..9e4631fade90 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -684,6 +684,10 @@ void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata)
ASSERT_RTNL();
mutex_lock(&sdata->local->iflist_mtx);
+ if (list_empty(&sdata->local->interfaces)) {
+ mutex_unlock(&sdata->local->iflist_mtx);
+ return;
+ }
list_del_rcu(&sdata->list);
mutex_unlock(&sdata->local->iflist_mtx);
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index 38d8121331d4..b0dd008e2114 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -102,16 +102,15 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
}
#ifdef CONFIG_SYSCTL
-static int mptcp_set_scheduler(const struct net *net, const char *name)
+static int mptcp_set_scheduler(char *scheduler, const char *name)
{
- struct mptcp_pernet *pernet = mptcp_get_pernet(net);
struct mptcp_sched_ops *sched;
int ret = 0;
rcu_read_lock();
sched = mptcp_sched_find(name);
if (sched)
- strscpy(pernet->scheduler, name, MPTCP_SCHED_NAME_MAX);
+ strscpy(scheduler, name, MPTCP_SCHED_NAME_MAX);
else
ret = -ENOENT;
rcu_read_unlock();
@@ -122,7 +121,7 @@ static int mptcp_set_scheduler(const struct net *net, const char *name)
static int proc_scheduler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- const struct net *net = current->nsproxy->net_ns;
+ char (*scheduler)[MPTCP_SCHED_NAME_MAX] = ctl->data;
char val[MPTCP_SCHED_NAME_MAX];
struct ctl_table tbl = {
.data = val,
@@ -130,11 +129,11 @@ static int proc_scheduler(const struct ctl_table *ctl, int write,
};
int ret;
- strscpy(val, mptcp_get_scheduler(net), MPTCP_SCHED_NAME_MAX);
+ strscpy(val, *scheduler, MPTCP_SCHED_NAME_MAX);
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
if (write && ret == 0)
- ret = mptcp_set_scheduler(net, val);
+ ret = mptcp_set_scheduler(*scheduler, val);
return ret;
}
@@ -161,7 +160,9 @@ static int proc_blackhole_detect_timeout(const struct ctl_table *table,
int write, void *buffer, size_t *lenp,
loff_t *ppos)
{
- struct mptcp_pernet *pernet = mptcp_get_pernet(current->nsproxy->net_ns);
+ struct mptcp_pernet *pernet = container_of(table->data,
+ struct mptcp_pernet,
+ blackhole_timeout);
int ret;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
@@ -228,7 +229,7 @@ static struct ctl_table mptcp_sysctl_table[] = {
{
.procname = "available_schedulers",
.maxlen = MPTCP_SCHED_BUF_MAX,
- .mode = 0644,
+ .mode = 0444,
.proc_handler = proc_available_schedulers,
},
{
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index a62bc874bf1e..123f3f297284 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -607,7 +607,6 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
}
opts->ext_copy.use_ack = 1;
opts->suboptions = OPTION_MPTCP_DSS;
- WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk));
/* Add kind/length/subtype/flag overhead if mapping is not populated */
if (dss_size == 0)
@@ -1288,7 +1287,7 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
}
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICT);
}
- return;
+ goto update_wspace;
}
if (rcv_wnd_new != rcv_wnd_old) {
@@ -1313,6 +1312,9 @@ raise_win:
th->window = htons(new_win);
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDSHARED);
}
+
+update_wspace:
+ WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
}
__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index a93e661ef5c4..73526f1d768f 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -760,10 +760,15 @@ static inline u64 mptcp_data_avail(const struct mptcp_sock *msk)
static inline bool mptcp_epollin_ready(const struct sock *sk)
{
+ u64 data_avail = mptcp_data_avail(mptcp_sk(sk));
+
+ if (!data_avail)
+ return false;
+
/* mptcp doesn't have to deal with small skbs in the receive queue,
- * at it can always coalesce them
+ * as it can always coalesce them
*/
- return (mptcp_data_avail(mptcp_sk(sk)) >= sk->sk_rcvlowat) ||
+ return (data_avail >= sk->sk_rcvlowat) ||
(mem_cgroup_sockets_enabled && sk->sk_memcg &&
mem_cgroup_under_socket_pressure(sk->sk_memcg)) ||
READ_ONCE(tcp_memory_pressure);
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index ef0f8f73826f..4e0842df5234 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -289,6 +289,7 @@ enum {
ncsi_dev_state_config_sp = 0x0301,
ncsi_dev_state_config_cis,
ncsi_dev_state_config_oem_gma,
+ ncsi_dev_state_config_apply_mac,
ncsi_dev_state_config_clear_vids,
ncsi_dev_state_config_svf,
ncsi_dev_state_config_ev,
@@ -322,6 +323,7 @@ struct ncsi_dev_priv {
#define NCSI_DEV_RESHUFFLE 4
#define NCSI_DEV_RESET 8 /* Reset state of NC */
unsigned int gma_flag; /* OEM GMA flag */
+ struct sockaddr pending_mac; /* MAC address received from GMA */
spinlock_t lock; /* Protect the NCSI device */
unsigned int package_probe_id;/* Current ID during probe */
unsigned int package_num; /* Number of packages */
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 5cf55bde366d..bf276eaf9330 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -1038,7 +1038,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
: ncsi_dev_state_config_clear_vids;
break;
case ncsi_dev_state_config_oem_gma:
- nd->state = ncsi_dev_state_config_clear_vids;
+ nd->state = ncsi_dev_state_config_apply_mac;
nca.package = np->id;
nca.channel = nc->id;
@@ -1050,10 +1050,22 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
nca.type = NCSI_PKT_CMD_OEM;
ret = ncsi_gma_handler(&nca, nc->version.mf_id);
}
- if (ret < 0)
+ if (ret < 0) {
+ nd->state = ncsi_dev_state_config_clear_vids;
schedule_work(&ndp->work);
+ }
break;
+ case ncsi_dev_state_config_apply_mac:
+ rtnl_lock();
+ ret = dev_set_mac_address(dev, &ndp->pending_mac, NULL);
+ rtnl_unlock();
+ if (ret < 0)
+ netdev_warn(dev, "NCSI: 'Writing MAC address to device failed\n");
+
+ nd->state = ncsi_dev_state_config_clear_vids;
+
+ fallthrough;
case ncsi_dev_state_config_clear_vids:
case ncsi_dev_state_config_svf:
case ncsi_dev_state_config_ev:
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index e28be33bdf2c..14bd66909ca4 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -628,16 +628,14 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr)
static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id)
{
struct ncsi_dev_priv *ndp = nr->ndp;
+ struct sockaddr *saddr = &ndp->pending_mac;
struct net_device *ndev = ndp->ndev.dev;
struct ncsi_rsp_oem_pkt *rsp;
- struct sockaddr saddr;
u32 mac_addr_off = 0;
- int ret = 0;
/* Get the response header */
rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
- saddr.sa_family = ndev->type;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
if (mfr_id == NCSI_OEM_MFR_BCM_ID)
mac_addr_off = BCM_MAC_ADDR_OFFSET;
@@ -646,22 +644,17 @@ static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id)
else if (mfr_id == NCSI_OEM_MFR_INTEL_ID)
mac_addr_off = INTEL_MAC_ADDR_OFFSET;
- memcpy(saddr.sa_data, &rsp->data[mac_addr_off], ETH_ALEN);
+ saddr->sa_family = ndev->type;
+ memcpy(saddr->sa_data, &rsp->data[mac_addr_off], ETH_ALEN);
if (mfr_id == NCSI_OEM_MFR_BCM_ID || mfr_id == NCSI_OEM_MFR_INTEL_ID)
- eth_addr_inc((u8 *)saddr.sa_data);
- if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
+ eth_addr_inc((u8 *)saddr->sa_data);
+ if (!is_valid_ether_addr((const u8 *)saddr->sa_data))
return -ENXIO;
/* Set the flag for GMA command which should only be called once */
ndp->gma_flag = 1;
- rtnl_lock();
- ret = dev_set_mac_address(ndev, &saddr, NULL);
- rtnl_unlock();
- if (ret < 0)
- netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
-
- return ret;
+ return 0;
}
/* Response handler for Mellanox card */
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 9db3e2b0b1c3..456446d7af20 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2517,12 +2517,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
struct hlist_nulls_head *hash;
unsigned int nr_slots, i;
- if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+ if (*sizep > (INT_MAX / sizeof(struct hlist_nulls_head)))
return NULL;
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+ if (nr_slots > (INT_MAX / sizeof(struct hlist_nulls_head)))
+ return NULL;
+
hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
if (hash && nulls)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 0b9f1e8dfe49..c4af283356e7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -8822,6 +8822,7 @@ static void nft_unregister_flowtable_hook(struct net *net,
}
static void __nft_unregister_flowtable_net_hooks(struct net *net,
+ struct nft_flowtable *flowtable,
struct list_head *hook_list,
bool release_netdev)
{
@@ -8829,6 +8830,8 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
list_for_each_entry_safe(hook, next, hook_list, list) {
nf_unregister_net_hook(net, &hook->ops);
+ flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
+ FLOW_BLOCK_UNBIND);
if (release_netdev) {
list_del(&hook->list);
kfree_rcu(hook, rcu);
@@ -8837,9 +8840,10 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
}
static void nft_unregister_flowtable_net_hooks(struct net *net,
+ struct nft_flowtable *flowtable,
struct list_head *hook_list)
{
- __nft_unregister_flowtable_net_hooks(net, hook_list, false);
+ __nft_unregister_flowtable_net_hooks(net, flowtable, hook_list, false);
}
static int nft_register_flowtable_net_hooks(struct net *net,
@@ -9481,8 +9485,6 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
flowtable->data.type->free(&flowtable->data);
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
- flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
- FLOW_BLOCK_UNBIND);
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
}
@@ -10870,6 +10872,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
&nft_trans_flowtable_hooks(trans),
trans->msg_type);
nft_unregister_flowtable_net_hooks(net,
+ nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans));
} else {
list_del_rcu(&nft_trans_flowtable(trans)->list);
@@ -10878,6 +10881,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
NULL,
trans->msg_type);
nft_unregister_flowtable_net_hooks(net,
+ nft_trans_flowtable(trans),
&nft_trans_flowtable(trans)->hook_list);
}
break;
@@ -11140,11 +11144,13 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
case NFT_MSG_NEWFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
nft_unregister_flowtable_net_hooks(net,
+ nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans));
} else {
nft_use_dec_restore(&table->use);
list_del_rcu(&nft_trans_flowtable(trans)->list);
nft_unregister_flowtable_net_hooks(net,
+ nft_trans_flowtable(trans),
&nft_trans_flowtable(trans)->hook_list);
}
break;
@@ -11737,7 +11743,8 @@ static void __nft_release_hook(struct net *net, struct nft_table *table)
list_for_each_entry(chain, &table->chains, list)
__nf_tables_unregister_hook(net, table, chain, true);
list_for_each_entry(flowtable, &table->flowtables, list)
- __nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list,
+ __nft_unregister_flowtable_net_hooks(net, flowtable,
+ &flowtable->hook_list,
true);
}
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 16e260014684..704c858cf209 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -934,7 +934,9 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
{
struct vport *vport = ovs_vport_rcu(dp, out_port);
- if (likely(vport && netif_carrier_ok(vport->dev))) {
+ if (likely(vport &&
+ netif_running(vport->dev) &&
+ netif_carrier_ok(vport->dev))) {
u16 mru = OVS_CB(skb)->mru;
u32 cutlen = OVS_CB(skb)->cutlen;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 351ac1747224..0581c53e6517 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -61,8 +61,10 @@ static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
static struct kmem_cache *rds_tcp_conn_slab;
-static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write,
- void *buffer, size_t *lenp, loff_t *fpos);
+static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *fpos);
+static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *fpos);
static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
@@ -74,7 +76,7 @@ static struct ctl_table rds_tcp_sysctl_table[] = {
/* data is per-net pointer */
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = rds_tcp_skbuf_handler,
+ .proc_handler = rds_tcp_sndbuf_handler,
.extra1 = &rds_tcp_min_sndbuf,
},
#define RDS_TCP_RCVBUF 1
@@ -83,7 +85,7 @@ static struct ctl_table rds_tcp_sysctl_table[] = {
/* data is per-net pointer */
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = rds_tcp_skbuf_handler,
+ .proc_handler = rds_tcp_rcvbuf_handler,
.extra1 = &rds_tcp_min_rcvbuf,
},
};
@@ -682,10 +684,10 @@ static void rds_tcp_sysctl_reset(struct net *net)
spin_unlock_irq(&rds_tcp_conn_lock);
}
-static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write,
+static int rds_tcp_skbuf_handler(struct rds_tcp_net *rtn,
+ const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *fpos)
{
- struct net *net = current->nsproxy->net_ns;
int err;
err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
@@ -694,11 +696,34 @@ static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write,
*(int *)(ctl->extra1));
return err;
}
- if (write)
+
+ if (write && rtn->rds_tcp_listen_sock && rtn->rds_tcp_listen_sock->sk) {
+ struct net *net = sock_net(rtn->rds_tcp_listen_sock->sk);
+
rds_tcp_sysctl_reset(net);
+ }
+
return 0;
}
+static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *fpos)
+{
+ struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
+ sndbuf_size);
+
+ return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
+}
+
+static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *fpos)
+{
+ struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
+ rcvbuf_size);
+
+ return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
+}
+
static void rds_tcp_exit(void)
{
rds_tcp_set_unloading();
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 5502998aace7..5c2580a07530 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -356,7 +356,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
[TCA_FLOW_KEYS] = { .type = NLA_U32 },
[TCA_FLOW_MODE] = { .type = NLA_U32 },
[TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
- [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
+ [TCA_FLOW_RSHIFT] = NLA_POLICY_MAX(NLA_U32,
+ 31 /* BITS_PER_U32 - 1 */),
[TCA_FLOW_ADDEND] = { .type = NLA_U32 },
[TCA_FLOW_MASK] = { .type = NLA_U32 },
[TCA_FLOW_XOR] = { .type = NLA_U32 },
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 8d8b2db4653c..2c2e2a67f3b2 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -627,6 +627,63 @@ static bool cake_ddst(int flow_mode)
return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
}
+static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_dsrc(flow_mode) &&
+ q->hosts[flow->srchost].srchost_bulk_flow_count))
+ q->hosts[flow->srchost].srchost_bulk_flow_count--;
+}
+
+static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_dsrc(flow_mode) &&
+ q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
+ q->hosts[flow->srchost].srchost_bulk_flow_count++;
+}
+
+static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_ddst(flow_mode) &&
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count))
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
+}
+
+static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_ddst(flow_mode) &&
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
+}
+
+static u16 cake_get_flow_quantum(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ u16 host_load = 1;
+
+ if (cake_dsrc(flow_mode))
+ host_load = max(host_load,
+ q->hosts[flow->srchost].srchost_bulk_flow_count);
+
+ if (cake_ddst(flow_mode))
+ host_load = max(host_load,
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count);
+
+ /* The get_random_u16() is a way to apply dithering to avoid
+ * accumulating roundoff errors
+ */
+ return (q->flow_quantum * quantum_div[host_load] +
+ get_random_u16()) >> 16;
+}
+
static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
int flow_mode, u16 flow_override, u16 host_override)
{
@@ -773,10 +830,8 @@ skip_hash:
allocate_dst = cake_ddst(flow_mode);
if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
- if (allocate_src)
- q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
- if (allocate_dst)
- q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
+ cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
+ cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
}
found:
/* reserve queue for future packets in same flow */
@@ -801,9 +856,10 @@ found:
q->hosts[outer_hash + k].srchost_tag = srchost_hash;
found_src:
srchost_idx = outer_hash + k;
- if (q->flows[reduced_hash].set == CAKE_SET_BULK)
- q->hosts[srchost_idx].srchost_bulk_flow_count++;
q->flows[reduced_hash].srchost = srchost_idx;
+
+ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+ cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
}
if (allocate_dst) {
@@ -824,9 +880,10 @@ found_src:
q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
found_dst:
dsthost_idx = outer_hash + k;
- if (q->flows[reduced_hash].set == CAKE_SET_BULK)
- q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
q->flows[reduced_hash].dsthost = dsthost_idx;
+
+ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+ cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
}
}
@@ -1839,10 +1896,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* flowchain */
if (!flow->set || flow->set == CAKE_SET_DECAYING) {
- struct cake_host *srchost = &b->hosts[flow->srchost];
- struct cake_host *dsthost = &b->hosts[flow->dsthost];
- u16 host_load = 1;
-
if (!flow->set) {
list_add_tail(&flow->flowchain, &b->new_flows);
} else {
@@ -1852,18 +1905,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
flow->set = CAKE_SET_SPARSE;
b->sparse_flow_count++;
- if (cake_dsrc(q->flow_mode))
- host_load = max(host_load, srchost->srchost_bulk_flow_count);
-
- if (cake_ddst(q->flow_mode))
- host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
-
- flow->deficit = (b->flow_quantum *
- quantum_div[host_load]) >> 16;
+ flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
- struct cake_host *srchost = &b->hosts[flow->srchost];
- struct cake_host *dsthost = &b->hosts[flow->dsthost];
-
/* this flow was empty, accounted as a sparse flow, but actually
* in the bulk rotation.
*/
@@ -1871,12 +1914,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
b->sparse_flow_count--;
b->bulk_flow_count++;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count++;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count++;
-
+ cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
}
if (q->buffer_used > q->buffer_max_used)
@@ -1933,13 +1972,11 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct cake_tin_data *b = &q->tins[q->cur_tin];
- struct cake_host *srchost, *dsthost;
ktime_t now = ktime_get();
struct cake_flow *flow;
struct list_head *head;
bool first_flow = true;
struct sk_buff *skb;
- u16 host_load;
u64 delay;
u32 len;
@@ -2039,11 +2076,6 @@ retry:
q->cur_flow = flow - b->flows;
first_flow = false;
- /* triple isolation (modified DRR++) */
- srchost = &b->hosts[flow->srchost];
- dsthost = &b->hosts[flow->dsthost];
- host_load = 1;
-
/* flow isolation (DRR++) */
if (flow->deficit <= 0) {
/* Keep all flows with deficits out of the sparse and decaying
@@ -2055,11 +2087,8 @@ retry:
b->sparse_flow_count--;
b->bulk_flow_count++;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count++;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count++;
+ cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
flow->set = CAKE_SET_BULK;
} else {
@@ -2071,19 +2100,7 @@ retry:
}
}
- if (cake_dsrc(q->flow_mode))
- host_load = max(host_load, srchost->srchost_bulk_flow_count);
-
- if (cake_ddst(q->flow_mode))
- host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
-
- WARN_ON(host_load > CAKE_QUEUES);
-
- /* The get_random_u16() is a way to apply dithering to avoid
- * accumulating roundoff errors
- */
- flow->deficit += (b->flow_quantum * quantum_div[host_load] +
- get_random_u16()) >> 16;
+ flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
list_move_tail(&flow->flowchain, &b->old_flows);
goto retry;
@@ -2107,11 +2124,8 @@ retry:
if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count--;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count--;
+ cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
b->decaying_flow_count++;
} else if (flow->set == CAKE_SET_SPARSE ||
@@ -2129,12 +2143,8 @@ retry:
else if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count--;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count--;
-
+ cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
} else
b->decaying_flow_count--;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index e5a5af343c4c..8e1e97be4df7 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -387,7 +387,8 @@ static struct ctl_table sctp_net_table[] = {
static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net,
+ sctp.sctp_hmac_alg);
struct ctl_table tbl;
bool changed = false;
char *none = "none";
@@ -432,7 +433,7 @@ static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net, sctp.rto_min);
unsigned int min = *(unsigned int *) ctl->extra1;
unsigned int max = *(unsigned int *) ctl->extra2;
struct ctl_table tbl;
@@ -460,7 +461,7 @@ static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write,
static int proc_sctp_do_rto_max(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net, sctp.rto_max);
unsigned int min = *(unsigned int *) ctl->extra1;
unsigned int max = *(unsigned int *) ctl->extra2;
struct ctl_table tbl;
@@ -498,7 +499,7 @@ static int proc_sctp_do_alpha_beta(const struct ctl_table *ctl, int write,
static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net, sctp.auth_enable);
struct ctl_table tbl;
int new_value, ret;
@@ -527,7 +528,7 @@ static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net, sctp.udp_port);
unsigned int min = *(unsigned int *)ctl->extra1;
unsigned int max = *(unsigned int *)ctl->extra2;
struct ctl_table tbl;
@@ -568,7 +569,8 @@ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
static int proc_sctp_do_probe_interval(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = container_of(ctl->data, struct net,
+ sctp.probe_interval);
struct ctl_table tbl;
int ret, new_value;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index bbf26cc4f6ee..7bcc9b4408a2 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -458,7 +458,7 @@ int tls_tx_records(struct sock *sk, int flags)
tx_err:
if (rc < 0 && rc != -EAGAIN)
- tls_err_abort(sk, -EBADMSG);
+ tls_err_abort(sk, rc);
return rc;
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 5cf8109f672a..fa9d1b49599b 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -491,6 +491,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
*/
vsk->transport->release(vsk);
vsock_deassign_transport(vsk);
+
+ /* transport's release() and destruct() can touch some socket
+ * state, since we are reassigning the socket to a new transport
+ * during vsock_connect(), let's reset these fields to have a
+ * clean state.
+ */
+ sock_reset_flag(sk, SOCK_DONE);
+ sk->sk_state = TCP_CLOSE;
+ vsk->peer_shutdown = 0;
}
/* We increase the module refcnt to prevent the transport unloading
@@ -870,6 +879,9 @@ EXPORT_SYMBOL_GPL(vsock_create_connected);
s64 vsock_stream_has_data(struct vsock_sock *vsk)
{
+ if (WARN_ON(!vsk->transport))
+ return 0;
+
return vsk->transport->stream_has_data(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_data);
@@ -878,6 +890,9 @@ s64 vsock_connectible_has_data(struct vsock_sock *vsk)
{
struct sock *sk = sk_vsock(vsk);
+ if (WARN_ON(!vsk->transport))
+ return 0;
+
if (sk->sk_type == SOCK_SEQPACKET)
return vsk->transport->seqpacket_has_data(vsk);
else
@@ -887,6 +902,9 @@ EXPORT_SYMBOL_GPL(vsock_connectible_has_data);
s64 vsock_stream_has_space(struct vsock_sock *vsk)
{
+ if (WARN_ON(!vsk->transport))
+ return 0;
+
return vsk->transport->stream_has_space(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_space);
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 9acc13ab3f82..7f7de6d88096 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -26,6 +26,9 @@
/* Threshold for detecting small packets to copy */
#define GOOD_COPY_LEN 128
+static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
+ bool cancel_timeout);
+
static const struct virtio_transport *
virtio_transport_get_ops(struct vsock_sock *vsk)
{
@@ -1109,6 +1112,8 @@ void virtio_transport_destruct(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
+ virtio_transport_cancel_close_work(vsk, true);
+
kfree(vvs);
vsk->trans = NULL;
}
@@ -1204,17 +1209,11 @@ static void virtio_transport_wait_close(struct sock *sk, long timeout)
}
}
-static void virtio_transport_do_close(struct vsock_sock *vsk,
- bool cancel_timeout)
+static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
+ bool cancel_timeout)
{
struct sock *sk = sk_vsock(vsk);
- sock_set_flag(sk, SOCK_DONE);
- vsk->peer_shutdown = SHUTDOWN_MASK;
- if (vsock_stream_has_data(vsk) <= 0)
- sk->sk_state = TCP_CLOSING;
- sk->sk_state_change(sk);
-
if (vsk->close_work_scheduled &&
(!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
vsk->close_work_scheduled = false;
@@ -1226,6 +1225,20 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
}
}
+static void virtio_transport_do_close(struct vsock_sock *vsk,
+ bool cancel_timeout)
+{
+ struct sock *sk = sk_vsock(vsk);
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ if (vsock_stream_has_data(vsk) <= 0)
+ sk->sk_state = TCP_CLOSING;
+ sk->sk_state_change(sk);
+
+ virtio_transport_cancel_close_work(vsk, cancel_timeout);
+}
+
static void virtio_transport_close_timeout(struct work_struct *work)
{
struct vsock_sock *vsk =
@@ -1628,8 +1641,11 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
lock_sock(sk);
- /* Check if sk has been closed before lock_sock */
- if (sock_flag(sk, SOCK_DONE)) {
+ /* Check if sk has been closed or assigned to another transport before
+ * lock_sock (note: listener sockets are not assigned to any transport)
+ */
+ if (sock_flag(sk, SOCK_DONE) ||
+ (sk->sk_state != TCP_LISTEN && vsk->transport != &t->transport)) {
(void)virtio_transport_reset_no_sock(t, skb);
release_sock(sk);
sock_put(sk);
diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c
index 4aa6e74ec295..f201d9eca1df 100644
--- a/net/vmw_vsock/vsock_bpf.c
+++ b/net/vmw_vsock/vsock_bpf.c
@@ -77,6 +77,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
size_t len, int flags, int *addr_len)
{
struct sk_psock *psock;
+ struct vsock_sock *vsk;
int copied;
psock = sk_psock_get(sk);
@@ -84,6 +85,13 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
return __vsock_recvmsg(sk, msg, len, flags);
lock_sock(sk);
+ vsk = vsock_sk(sk);
+
+ if (!vsk->transport) {
+ copied = -ENODEV;
+ goto out;
+ }
+
if (vsock_has_data(sk, psock) && sk_psock_queue_empty(psock)) {
release_sock(sk);
sk_psock_put(sk, psock);
@@ -108,6 +116,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
}
+out:
release_sock(sk);
sk_psock_put(sk, psock);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 3fa70286c846..89d2bef96469 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -322,7 +322,6 @@ static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
return -ENOSPC;
}
- sk_mark_napi_id_once_xdp(&xs->sk, xdp);
return 0;
}
@@ -908,11 +907,8 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
if (unlikely(!xs->tx))
return -ENOBUFS;
- if (sk_can_busy_loop(sk)) {
- if (xs->zc)
- __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
+ if (sk_can_busy_loop(sk))
sk_busy_loop(sk, 1); /* only support non-blocking sockets */
- }
if (xs->zc && xsk_no_wakeup(sk))
return 0;
@@ -1298,6 +1294,14 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xs->queue_id = qid;
xp_add_xsk(xs->pool, xs);
+ if (xs->zc && qid < dev->real_num_rx_queues) {
+ struct netdev_rx_queue *rxq;
+
+ rxq = __netif_get_rx_queue(dev, qid);
+ if (rxq->napi)
+ __sk_mark_napi_id_once(sk, rxq->napi->napi_id);
+ }
+
out_unlock:
if (err) {
dev_put(dev);
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index cc0b0af20296..1f2680bcc43a 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -174,13 +174,15 @@ int avc_get_hash_stats(char *page)
* using a linked list for extended_perms_decision lookup because the list is
* always small. i.e. less than 5, typically 1
*/
-static struct extended_perms_decision *avc_xperms_decision_lookup(u8 driver,
- struct avc_xperms_node *xp_node)
+static struct extended_perms_decision *
+avc_xperms_decision_lookup(u8 driver, u8 base_perm,
+ struct avc_xperms_node *xp_node)
{
struct avc_xperms_decision_node *xpd_node;
list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) {
- if (xpd_node->xpd.driver == driver)
+ if (xpd_node->xpd.driver == driver &&
+ xpd_node->xpd.base_perm == base_perm)
return &xpd_node->xpd;
}
return NULL;
@@ -205,11 +207,12 @@ avc_xperms_has_perm(struct extended_perms_decision *xpd,
}
static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node,
- u8 driver, u8 perm)
+ u8 driver, u8 base_perm, u8 perm)
{
struct extended_perms_decision *xpd;
security_xperm_set(xp_node->xp.drivers.p, driver);
- xpd = avc_xperms_decision_lookup(driver, xp_node);
+ xp_node->xp.base_perms |= base_perm;
+ xpd = avc_xperms_decision_lookup(driver, base_perm, xp_node);
if (xpd && xpd->allowed)
security_xperm_set(xpd->allowed->p, perm);
}
@@ -245,6 +248,7 @@ static void avc_xperms_free(struct avc_xperms_node *xp_node)
static void avc_copy_xperms_decision(struct extended_perms_decision *dest,
struct extended_perms_decision *src)
{
+ dest->base_perm = src->base_perm;
dest->driver = src->driver;
dest->used = src->used;
if (dest->used & XPERMS_ALLOWED)
@@ -272,6 +276,7 @@ static inline void avc_quick_copy_xperms_decision(u8 perm,
*/
u8 i = perm >> 5;
+ dest->base_perm = src->base_perm;
dest->used = src->used;
if (dest->used & XPERMS_ALLOWED)
dest->allowed->p[i] = src->allowed->p[i];
@@ -357,6 +362,7 @@ static int avc_xperms_populate(struct avc_node *node,
memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p));
dest->xp.len = src->xp.len;
+ dest->xp.base_perms = src->xp.base_perms;
/* for each source xpd allocate a destination xpd and copy */
list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) {
@@ -807,6 +813,7 @@ out:
* @event : Updating event
* @perms : Permission mask bits
* @driver: xperm driver information
+ * @base_perm: the base permission associated with the extended permission
* @xperm: xperm permissions
* @ssid: AVC entry source sid
* @tsid: AVC entry target sid
@@ -820,10 +827,9 @@ out:
* otherwise, this function updates the AVC entry. The original AVC-entry object
* will release later by RCU.
*/
-static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
- u32 tsid, u16 tclass, u32 seqno,
- struct extended_perms_decision *xpd,
- u32 flags)
+static int avc_update_node(u32 event, u32 perms, u8 driver, u8 base_perm,
+ u8 xperm, u32 ssid, u32 tsid, u16 tclass, u32 seqno,
+ struct extended_perms_decision *xpd, u32 flags)
{
u32 hvalue;
int rc = 0;
@@ -880,7 +886,7 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
case AVC_CALLBACK_GRANT:
node->ae.avd.allowed |= perms;
if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS))
- avc_xperms_allow_perm(node->ae.xp_node, driver, xperm);
+ avc_xperms_allow_perm(node->ae.xp_node, driver, base_perm, xperm);
break;
case AVC_CALLBACK_TRY_REVOKE:
case AVC_CALLBACK_REVOKE:
@@ -987,10 +993,9 @@ static noinline void avc_compute_av(u32 ssid, u32 tsid, u16 tclass,
avc_insert(ssid, tsid, tclass, avd, xp_node);
}
-static noinline int avc_denied(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- u8 driver, u8 xperm, unsigned int flags,
- struct av_decision *avd)
+static noinline int avc_denied(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ u8 driver, u8 base_perm, u8 xperm,
+ unsigned int flags, struct av_decision *avd)
{
if (flags & AVC_STRICT)
return -EACCES;
@@ -999,7 +1004,7 @@ static noinline int avc_denied(u32 ssid, u32 tsid,
!(avd->flags & AVD_FLAGS_PERMISSIVE))
return -EACCES;
- avc_update_node(AVC_CALLBACK_GRANT, requested, driver,
+ avc_update_node(AVC_CALLBACK_GRANT, requested, driver, base_perm,
xperm, ssid, tsid, tclass, avd->seqno, NULL, flags);
return 0;
}
@@ -1012,7 +1017,8 @@ static noinline int avc_denied(u32 ssid, u32 tsid,
* driver field is used to specify which set contains the permission.
*/
int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
- u8 driver, u8 xperm, struct common_audit_data *ad)
+ u8 driver, u8 base_perm, u8 xperm,
+ struct common_audit_data *ad)
{
struct avc_node *node;
struct av_decision avd;
@@ -1047,22 +1053,23 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
local_xpd.auditallow = &auditallow;
local_xpd.dontaudit = &dontaudit;
- xpd = avc_xperms_decision_lookup(driver, xp_node);
+ xpd = avc_xperms_decision_lookup(driver, base_perm, xp_node);
if (unlikely(!xpd)) {
/*
* Compute the extended_perms_decision only if the driver
- * is flagged
+ * is flagged and the base permission is known.
*/
- if (!security_xperm_test(xp_node->xp.drivers.p, driver)) {
+ if (!security_xperm_test(xp_node->xp.drivers.p, driver) ||
+ !(xp_node->xp.base_perms & base_perm)) {
avd.allowed &= ~requested;
goto decision;
}
rcu_read_unlock();
- security_compute_xperms_decision(ssid, tsid, tclass,
- driver, &local_xpd);
+ security_compute_xperms_decision(ssid, tsid, tclass, driver,
+ base_perm, &local_xpd);
rcu_read_lock();
- avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested,
- driver, xperm, ssid, tsid, tclass, avd.seqno,
+ avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested, driver,
+ base_perm, xperm, ssid, tsid, tclass, avd.seqno,
&local_xpd, 0);
} else {
avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd);
@@ -1075,8 +1082,8 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
decision:
denied = requested & ~(avd.allowed);
if (unlikely(denied))
- rc = avc_denied(ssid, tsid, tclass, requested,
- driver, xperm, AVC_EXTENDED_PERMS, &avd);
+ rc = avc_denied(ssid, tsid, tclass, requested, driver,
+ base_perm, xperm, AVC_EXTENDED_PERMS, &avd);
rcu_read_unlock();
@@ -1110,7 +1117,7 @@ static noinline int avc_perm_nonode(u32 ssid, u32 tsid, u16 tclass,
avc_compute_av(ssid, tsid, tclass, avd, &xp_node);
denied = requested & ~(avd->allowed);
if (unlikely(denied))
- return avc_denied(ssid, tsid, tclass, requested, 0, 0,
+ return avc_denied(ssid, tsid, tclass, requested, 0, 0, 0,
flags, avd);
return 0;
}
@@ -1158,7 +1165,7 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
rcu_read_unlock();
if (unlikely(denied))
- return avc_denied(ssid, tsid, tclass, requested, 0, 0,
+ return avc_denied(ssid, tsid, tclass, requested, 0, 0, 0,
flags, avd);
return 0;
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 366c87a40bd1..171dd7fceac5 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3688,8 +3688,8 @@ static int ioctl_has_perm(const struct cred *cred, struct file *file,
return 0;
isec = inode_security(inode);
- rc = avc_has_extended_perms(ssid, isec->sid, isec->sclass,
- requested, driver, xperm, &ad);
+ rc = avc_has_extended_perms(ssid, isec->sid, isec->sclass, requested,
+ driver, AVC_EXT_IOCTL, xperm, &ad);
out:
return rc;
}
@@ -5952,7 +5952,7 @@ static int nlmsg_sock_has_extended_perms(struct sock *sk, u32 perms, u16 nlmsg_t
xperm = nlmsg_type & 0xff;
return avc_has_extended_perms(current_sid(), sksec->sid, sksec->sclass,
- perms, driver, xperm, &ad);
+ perms, driver, AVC_EXT_NLMSG, xperm, &ad);
}
static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 96a614d47df8..281f40103663 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -136,8 +136,11 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested,
int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested,
struct common_audit_data *auditdata);
+#define AVC_EXT_IOCTL (1 << 0) /* Cache entry for an ioctl extended permission */
+#define AVC_EXT_NLMSG (1 << 1) /* Cache entry for an nlmsg extended permission */
int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
- u8 driver, u8 perm, struct common_audit_data *ad);
+ u8 driver, u8 base_perm, u8 perm,
+ struct common_audit_data *ad);
u32 avc_policy_seqno(void);
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index c7f2731abd03..700bd6c8bb38 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -239,6 +239,7 @@ struct extended_perms_data {
struct extended_perms_decision {
u8 used;
u8 driver;
+ u8 base_perm;
struct extended_perms_data *allowed;
struct extended_perms_data *auditallow;
struct extended_perms_data *dontaudit;
@@ -246,6 +247,7 @@ struct extended_perms_decision {
struct extended_perms {
u16 len; /* length associated decision chain */
+ u8 base_perms; /* which base permissions are covered */
struct extended_perms_data drivers; /* flag drivers that are used */
};
@@ -257,6 +259,7 @@ void security_compute_av(u32 ssid, u32 tsid, u16 tclass,
struct extended_perms *xperms);
void security_compute_xperms_decision(u32 ssid, u32 tsid, u16 tclass, u8 driver,
+ u8 base_perm,
struct extended_perms_decision *xpermd);
void security_compute_av_user(u32 ssid, u32 tsid, u16 tclass,
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 3d5c563cfc4c..d9f58b5d0f49 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -582,7 +582,7 @@ static void type_attribute_bounds_av(struct policydb *policydb,
}
/*
- * Flag which drivers have permissions.
+ * Flag which drivers have permissions and which base permissions are covered.
*/
void services_compute_xperms_drivers(
struct extended_perms *xperms,
@@ -592,12 +592,19 @@ void services_compute_xperms_drivers(
switch (node->datum.u.xperms->specified) {
case AVTAB_XPERMS_IOCTLDRIVER:
+ xperms->base_perms |= AVC_EXT_IOCTL;
/* if one or more driver has all permissions allowed */
for (i = 0; i < ARRAY_SIZE(xperms->drivers.p); i++)
xperms->drivers.p[i] |= node->datum.u.xperms->perms.p[i];
break;
case AVTAB_XPERMS_IOCTLFUNCTION:
+ xperms->base_perms |= AVC_EXT_IOCTL;
+ /* if allowing permissions within a driver */
+ security_xperm_set(xperms->drivers.p,
+ node->datum.u.xperms->driver);
+ break;
case AVTAB_XPERMS_NLMSG:
+ xperms->base_perms |= AVC_EXT_NLMSG;
/* if allowing permissions within a driver */
security_xperm_set(xperms->drivers.p,
node->datum.u.xperms->driver);
@@ -631,8 +638,7 @@ static void context_struct_compute_av(struct policydb *policydb,
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
if (xperms) {
- memset(&xperms->drivers, 0, sizeof(xperms->drivers));
- xperms->len = 0;
+ memset(xperms, 0, sizeof(*xperms));
}
if (unlikely(!tclass || tclass > policydb->p_classes.nprim)) {
@@ -969,13 +975,19 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
{
switch (node->datum.u.xperms->specified) {
case AVTAB_XPERMS_IOCTLFUNCTION:
- case AVTAB_XPERMS_NLMSG:
- if (xpermd->driver != node->datum.u.xperms->driver)
+ if (xpermd->base_perm != AVC_EXT_IOCTL ||
+ xpermd->driver != node->datum.u.xperms->driver)
return;
break;
case AVTAB_XPERMS_IOCTLDRIVER:
- if (!security_xperm_test(node->datum.u.xperms->perms.p,
- xpermd->driver))
+ if (xpermd->base_perm != AVC_EXT_IOCTL ||
+ !security_xperm_test(node->datum.u.xperms->perms.p,
+ xpermd->driver))
+ return;
+ break;
+ case AVTAB_XPERMS_NLMSG:
+ if (xpermd->base_perm != AVC_EXT_NLMSG ||
+ xpermd->driver != node->datum.u.xperms->driver)
return;
break;
default:
@@ -1010,6 +1022,7 @@ void security_compute_xperms_decision(u32 ssid,
u32 tsid,
u16 orig_tclass,
u8 driver,
+ u8 base_perm,
struct extended_perms_decision *xpermd)
{
struct selinux_policy *policy;
@@ -1023,6 +1036,7 @@ void security_compute_xperms_decision(u32 ssid,
struct ebitmap_node *snode, *tnode;
unsigned int i, j;
+ xpermd->base_perm = base_perm;
xpermd->driver = driver;
xpermd->used = 0;
memset(xpermd->allowed->p, 0, sizeof(xpermd->allowed->p));
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b74b566f675e..ad66378d7321 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -10641,6 +10641,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1e1f, "ASUS Vivobook 15 X1504VAP", ALC2XX_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1e63, "ASUS H7606W", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
+ SND_PCI_QUIRK(0x1043, 0x1e83, "ASUS GA605W", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1eb3, "ASUS Ally RCLA72", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x1ed3, "ASUS HN7306W", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10930,8 +10932,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x38e0, "Yoga Y990 Intel VECO Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38f8, "Yoga Book 9i", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38df, "Y990 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
- SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+ SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x38fd, "ThinkBook plus Gen5 Hybrid", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC),
@@ -10995,6 +10997,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13),
SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 0f2df7c91e18..0b9e87dc2b6c 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -2451,6 +2451,7 @@ config SND_SOC_WM8993
config SND_SOC_WM8994
tristate
+ depends on MFD_WM8994
config SND_SOC_WM8995
tristate
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index 4236f78beec0..83c21c17fb80 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -2404,6 +2404,7 @@ static int cs42l43_codec_runtime_resume(struct device *dev)
static const struct dev_pm_ops cs42l43_codec_pm_ops = {
RUNTIME_PM_OPS(NULL, cs42l43_codec_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
static const struct platform_device_id cs42l43_codec_id_table[] = {
diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
index 61729e5b50a8..f508df01145b 100644
--- a/sound/soc/codecs/es8316.c
+++ b/sound/soc/codecs/es8316.c
@@ -39,7 +39,9 @@ struct es8316_priv {
struct snd_soc_jack *jack;
int irq;
unsigned int sysclk;
- unsigned int allowed_rates[ARRAY_SIZE(supported_mclk_lrck_ratios)];
+ /* ES83xx supports halving the MCLK so it supports twice as many rates
+ */
+ unsigned int allowed_rates[ARRAY_SIZE(supported_mclk_lrck_ratios) * 2];
struct snd_pcm_hw_constraint_list sysclk_constraints;
bool jd_inverted;
};
@@ -386,6 +388,12 @@ static int es8316_set_dai_sysclk(struct snd_soc_dai *codec_dai,
if (freq % ratio == 0)
es8316->allowed_rates[count++] = freq / ratio;
+
+ /* We also check if the halved MCLK produces a valid rate
+ * since the codec supports halving the MCLK.
+ */
+ if ((freq / ratio) % 2 == 0)
+ es8316->allowed_rates[count++] = freq / ratio / 2;
}
if (count) {
diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
index a5603b617688..b06eead7e0f6 100644
--- a/sound/soc/codecs/es8326.c
+++ b/sound/soc/codecs/es8326.c
@@ -616,7 +616,7 @@ static int es8326_mute(struct snd_soc_dai *dai, int mute, int direction)
0x0F, 0x0F);
if (es8326->version > ES8326_VERSION_B) {
regmap_update_bits(es8326->regmap, ES8326_VMIDSEL, 0x40, 0x40);
- regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x10);
+ regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x30);
}
}
} else {
@@ -631,6 +631,8 @@ static int es8326_mute(struct snd_soc_dai *dai, int mute, int direction)
regmap_write(es8326->regmap, ES8326_HPR_OFFSET_INI, offset_r);
es8326->calibrated = true;
}
+ regmap_update_bits(es8326->regmap, ES8326_CLK_INV, 0xc0, 0x00);
+ regmap_update_bits(es8326->regmap, ES8326_CLK_MUX, 0x80, 0x00);
if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
regmap_update_bits(es8326->regmap, ES8326_DAC_DSM, 0x01, 0x01);
usleep_range(1000, 5000);
@@ -645,7 +647,7 @@ static int es8326_mute(struct snd_soc_dai *dai, int mute, int direction)
} else {
msleep(300);
if (es8326->version > ES8326_VERSION_B) {
- regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x50);
+ regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x70);
regmap_update_bits(es8326->regmap, ES8326_VMIDSEL, 0x40, 0x00);
}
regmap_update_bits(es8326->regmap, ES8326_ADC_MUTE,
@@ -676,6 +678,10 @@ static int es8326_set_bias_level(struct snd_soc_component *codec,
regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x00);
regmap_update_bits(es8326->regmap, ES8326_CLK_CTL, 0x20, 0x20);
regmap_update_bits(es8326->regmap, ES8326_RESET, 0x02, 0x00);
+ if (es8326->version > ES8326_VERSION_B) {
+ regmap_update_bits(es8326->regmap, ES8326_VMIDSEL, 0x40, 0x40);
+ regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x30);
+ }
break;
case SND_SOC_BIAS_PREPARE:
break;
@@ -683,6 +689,12 @@ static int es8326_set_bias_level(struct snd_soc_component *codec,
regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x3b);
regmap_update_bits(es8326->regmap, ES8326_CLK_CTL, 0x20, 0x00);
regmap_write(es8326->regmap, ES8326_SDINOUT1_IO, ES8326_IO_INPUT);
+ if (es8326->version > ES8326_VERSION_B) {
+ regmap_update_bits(es8326->regmap, ES8326_VMIDSEL, 0x40, 0x40);
+ regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x10);
+ }
+ regmap_update_bits(es8326->regmap, ES8326_CLK_INV, 0xc0, 0xc0);
+ regmap_update_bits(es8326->regmap, ES8326_CLK_MUX, 0x80, 0x80);
break;
case SND_SOC_BIAS_OFF:
clk_disable_unprepare(es8326->mclk);
@@ -773,7 +785,10 @@ static void es8326_jack_button_handler(struct work_struct *work)
case 0x6f:
case 0x4b:
/* button volume up */
- cur_button = SND_JACK_BTN_1;
+ if ((iface == 0x6f) && (es8326->version > ES8326_VERSION_B))
+ cur_button = SND_JACK_BTN_0;
+ else
+ cur_button = SND_JACK_BTN_1;
break;
case 0x27:
/* button volume down */
@@ -1082,7 +1097,7 @@ static void es8326_init(struct snd_soc_component *component)
regmap_write(es8326->regmap, ES8326_ADC2_SRC, 0x66);
es8326_disable_micbias(es8326->component);
if (es8326->version > ES8326_VERSION_B) {
- regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x73, 0x13);
+ regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x73, 0x10);
regmap_update_bits(es8326->regmap, ES8326_VMIDSEL, 0x40, 0x40);
}
diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c
index fb8cd2284fe8..728bf78ae71f 100644
--- a/sound/soc/codecs/tas2781-i2c.c
+++ b/sound/soc/codecs/tas2781-i2c.c
@@ -78,7 +78,7 @@ static const struct bulk_reg_val tas2781_cali_start_reg[] = {
X2781_CL_STT_VAL(TAS2781_PRM_INT_MASK_REG, 0xfe, false),
X2781_CL_STT_VAL(TAS2781_PRM_CLK_CFG_REG, 0xdd, false),
X2781_CL_STT_VAL(TAS2781_PRM_RSVD_REG, 0x20, false),
- X2781_CL_STT_VAL(TAS2781_PRM_TEST_57_REG, 0x14, false),
+ X2781_CL_STT_VAL(TAS2781_PRM_TEST_57_REG, 0x14, true),
X2781_CL_STT_VAL(TAS2781_PRM_TEST_62_REG, 0x45, true),
X2781_CL_STT_VAL(TAS2781_PRM_PVDD_UVLO_REG, 0x03, false),
X2781_CL_STT_VAL(TAS2781_PRM_CHNL_0_REG, 0xa8, false),
diff --git a/sound/soc/renesas/rcar/adg.c b/sound/soc/renesas/rcar/adg.c
index 0f190abf00e7..191f212d338c 100644
--- a/sound/soc/renesas/rcar/adg.c
+++ b/sound/soc/renesas/rcar/adg.c
@@ -374,12 +374,12 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
return 0;
}
-void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
+int rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
{
struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
struct clk *clk;
- int i;
+ int ret = 0, i;
if (enable) {
rsnd_mod_bset(adg_mod, BRGCKR, 0x80770000, adg->ckr);
@@ -389,18 +389,33 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
for_each_rsnd_clkin(clk, adg, i) {
if (enable) {
- clk_prepare_enable(clk);
+ ret = clk_prepare_enable(clk);
/*
* We shouldn't use clk_get_rate() under
* atomic context. Let's keep it when
* rsnd_adg_clk_enable() was called
*/
+ if (ret < 0)
+ break;
+
adg->clkin_rate[i] = clk_get_rate(clk);
} else {
- clk_disable_unprepare(clk);
+ if (adg->clkin_rate[i])
+ clk_disable_unprepare(clk);
+
+ adg->clkin_rate[i] = 0;
}
}
+
+ /*
+ * rsnd_adg_clk_enable() might return error (_disable() will not).
+ * We need to rollback in such case
+ */
+ if (ret < 0)
+ rsnd_adg_clk_disable(priv);
+
+ return ret;
}
static struct clk *rsnd_adg_create_null_clk(struct rsnd_priv *priv,
@@ -753,7 +768,10 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
if (ret)
return ret;
- rsnd_adg_clk_enable(priv);
+ ret = rsnd_adg_clk_enable(priv);
+ if (ret)
+ return ret;
+
rsnd_adg_clk_dbg_info(priv, NULL);
return 0;
diff --git a/sound/soc/renesas/rcar/core.c b/sound/soc/renesas/rcar/core.c
index e2234928c9e8..d3709fd0409e 100644
--- a/sound/soc/renesas/rcar/core.c
+++ b/sound/soc/renesas/rcar/core.c
@@ -2086,9 +2086,7 @@ static int __maybe_unused rsnd_resume(struct device *dev)
{
struct rsnd_priv *priv = dev_get_drvdata(dev);
- rsnd_adg_clk_enable(priv);
-
- return 0;
+ return rsnd_adg_clk_enable(priv);
}
static const struct dev_pm_ops rsnd_pm_ops = {
diff --git a/sound/soc/renesas/rcar/rsnd.h b/sound/soc/renesas/rcar/rsnd.h
index 3c164d8e3b16..a5f54b65313c 100644
--- a/sound/soc/renesas/rcar/rsnd.h
+++ b/sound/soc/renesas/rcar/rsnd.h
@@ -608,7 +608,7 @@ int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *cmd_mod,
struct rsnd_dai_stream *io);
#define rsnd_adg_clk_enable(priv) rsnd_adg_clk_control(priv, 1)
#define rsnd_adg_clk_disable(priv) rsnd_adg_clk_control(priv, 0)
-void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable);
+int rsnd_adg_clk_control(struct rsnd_priv *priv, int enable);
void rsnd_adg_clk_dbg_info(struct rsnd_priv *priv, struct seq_file *m);
/*
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 4b1ea7b2c796..60b4b7b75215 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -127,8 +127,9 @@ config SND_SOC_SAMSUNG_TM2_WM5110
config SND_SOC_SAMSUNG_ARIES_WM8994
tristate "SoC I2S Audio support for WM8994 on Aries"
- depends on SND_SOC_SAMSUNG && MFD_WM8994 && IIO && EXTCON
+ depends on SND_SOC_SAMSUNG && I2C && IIO && EXTCON
select SND_SOC_BT_SCO
+ select MFD_WM8994
select SND_SOC_WM8994
select SND_SAMSUNG_I2S
help
@@ -140,8 +141,9 @@ config SND_SOC_SAMSUNG_ARIES_WM8994
config SND_SOC_SAMSUNG_MIDAS_WM1811
tristate "SoC I2S Audio support for Midas boards"
- depends on SND_SOC_SAMSUNG && IIO
+ depends on SND_SOC_SAMSUNG && I2C && IIO
select SND_SAMSUNG_I2S
+ select MFD_WM8994
select SND_SOC_WM8994
help
Say Y if you want to add support for SoC audio on the Midas boards.
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index d8201c4b1520..6750fdb42564 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -2384,6 +2384,17 @@ def print_kernel_family_struct_src(family, cw):
if not kernel_can_gen_family_struct(family):
return
+ if 'sock-priv' in family.kernel_family:
+ # Generate "trampolines" to make CFI happy
+ cw.write_func("static void", f"__{family.c_name}_nl_sock_priv_init",
+ [f"{family.c_name}_nl_sock_priv_init(priv);"],
+ ["void *priv"])
+ cw.nl()
+ cw.write_func("static void", f"__{family.c_name}_nl_sock_priv_destroy",
+ [f"{family.c_name}_nl_sock_priv_destroy(priv);"],
+ ["void *priv"])
+ cw.nl()
+
cw.block_start(f"struct genl_family {family.ident_name}_nl_family __ro_after_init =")
cw.p('.name\t\t= ' + family.fam_key + ',')
cw.p('.version\t= ' + family.ver_key + ',')
@@ -2401,9 +2412,8 @@ def print_kernel_family_struct_src(family, cw):
cw.p(f'.n_mcgrps\t= ARRAY_SIZE({family.c_name}_nl_mcgrps),')
if 'sock-priv' in family.kernel_family:
cw.p(f'.sock_priv_size\t= sizeof({family.kernel_family["sock-priv"]}),')
- # Force cast here, actual helpers take pointer to the real type.
- cw.p(f'.sock_priv_init\t= (void *){family.c_name}_nl_sock_priv_init,')
- cw.p(f'.sock_priv_destroy = (void *){family.c_name}_nl_sock_priv_destroy,')
+ cw.p(f'.sock_priv_init\t= __{family.c_name}_nl_sock_priv_init,')
+ cw.p(f'.sock_priv_destroy = __{family.c_name}_nl_sock_priv_destroy,')
cw.block_end(';')
diff --git a/tools/testing/selftests/cgroup/test_cpuset_prs.sh b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
index 03c1bdaed2c3..400a696a0d21 100755
--- a/tools/testing/selftests/cgroup/test_cpuset_prs.sh
+++ b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
@@ -86,15 +86,15 @@ echo "" > test/cpuset.cpus
#
# If isolated CPUs have been reserved at boot time (as shown in
-# cpuset.cpus.isolated), these isolated CPUs should be outside of CPUs 0-7
+# cpuset.cpus.isolated), these isolated CPUs should be outside of CPUs 0-8
# that will be used by this script for testing purpose. If not, some of
-# the tests may fail incorrectly. These isolated CPUs will also be removed
-# before being compared with the expected results.
+# the tests may fail incorrectly. These pre-isolated CPUs should stay in
+# an isolated state throughout the testing process for now.
#
BOOT_ISOLCPUS=$(cat $CGROUP2/cpuset.cpus.isolated)
if [[ -n "$BOOT_ISOLCPUS" ]]
then
- [[ $(echo $BOOT_ISOLCPUS | sed -e "s/[,-].*//") -le 7 ]] &&
+ [[ $(echo $BOOT_ISOLCPUS | sed -e "s/[,-].*//") -le 8 ]] &&
skip_test "Pre-isolated CPUs ($BOOT_ISOLCPUS) overlap CPUs to be tested"
echo "Pre-isolated CPUs: $BOOT_ISOLCPUS"
fi
@@ -684,14 +684,18 @@ check_isolcpus()
fi
#
+ # Appending pre-isolated CPUs
+ # Even though CPU #8 isn't used for testing, it can't be pre-isolated
+ # to make appending those CPUs easier.
+ #
+ [[ -n "$BOOT_ISOLCPUS" ]] && {
+ EXPECT_VAL=${EXPECT_VAL:+${EXPECT_VAL},}${BOOT_ISOLCPUS}
+ EXPECT_VAL2=${EXPECT_VAL2:+${EXPECT_VAL2},}${BOOT_ISOLCPUS}
+ }
+
+ #
# Check cpuset.cpus.isolated cpumask
#
- if [[ -z "$BOOT_ISOLCPUS" ]]
- then
- ISOLCPUS=$(cat $ISCPUS)
- else
- ISOLCPUS=$(cat $ISCPUS | sed -e "s/,*$BOOT_ISOLCPUS//")
- fi
[[ "$EXPECT_VAL2" != "$ISOLCPUS" ]] && {
# Take a 50ms pause and try again
pause 0.05
@@ -731,8 +735,6 @@ check_isolcpus()
fi
done
[[ "$ISOLCPUS" = *- ]] && ISOLCPUS=${ISOLCPUS}$LASTISOLCPU
- [[ -n "BOOT_ISOLCPUS" ]] &&
- ISOLCPUS=$(echo $ISOLCPUS | sed -e "s/,*$BOOT_ISOLCPUS//")
[[ "$EXPECT_VAL" = "$ISOLCPUS" ]]
}
@@ -836,8 +838,11 @@ run_state_test()
# if available
[[ -n "$ICPUS" ]] && {
check_isolcpus $ICPUS
- [[ $? -ne 0 ]] && test_fail $I "isolated CPU" \
- "Expect $ICPUS, get $ISOLCPUS instead"
+ [[ $? -ne 0 ]] && {
+ [[ -n "$BOOT_ISOLCPUS" ]] && ICPUS=${ICPUS},${BOOT_ISOLCPUS}
+ test_fail $I "isolated CPU" \
+ "Expect $ICPUS, get $ISOLCPUS instead"
+ }
}
reset_cgroup_states
#
diff --git a/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh b/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh
index fd13c8cfb7a8..b411fe66510f 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh
@@ -58,9 +58,12 @@ for root in mq mqprio; do
ethtool -L $NDEV combined 4
n_child_assert 4 "One real queue, rest default"
- # Graft some
- tcq replace parent 100:1 handle 204:
- n_child_assert 3 "Grafted"
+ # Remove real one
+ tcq del parent 100:4 handle 204:
+
+ # Replace default with pfifo
+ tcq replace parent 100:1 handle 205: pfifo limit 1000
+ n_child_assert 3 "Deleting real one, replacing default one with pfifo"
ethtool -L $NDEV combined 1
n_child_assert 1 "Grafted, one"
diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
index a79b7f18452d..3a97c160b5fe 100644
--- a/tools/testing/selftests/kvm/aarch64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
@@ -152,7 +152,6 @@ static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
REG_FTR_END,
};
diff --git a/tools/testing/selftests/kvm/s390x/ucontrol_test.c b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
index 0c112319dab1..135ee22856cf 100644
--- a/tools/testing/selftests/kvm/s390x/ucontrol_test.c
+++ b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
@@ -210,10 +210,13 @@ TEST_F(uc_kvm, uc_attr_mem_limit)
struct kvm_device_attr attr = {
.group = KVM_S390_VM_MEM_CTRL,
.attr = KVM_S390_VM_MEM_LIMIT_SIZE,
- .addr = (unsigned long)&limit,
+ .addr = (u64)&limit,
};
int rc;
+ rc = ioctl(self->vm_fd, KVM_HAS_DEVICE_ATTR, &attr);
+ EXPECT_EQ(0, rc);
+
rc = ioctl(self->vm_fd, KVM_GET_DEVICE_ATTR, &attr);
EXPECT_EQ(0, rc);
EXPECT_EQ(~0UL, limit);
@@ -635,4 +638,171 @@ TEST_F(uc_kvm, uc_skey)
uc_assert_diag44(self);
}
+static char uc_flic_b[PAGE_SIZE];
+static struct kvm_s390_io_adapter uc_flic_ioa = { .id = 0 };
+static struct kvm_s390_io_adapter_req uc_flic_ioam = { .id = 0 };
+static struct kvm_s390_ais_req uc_flic_asim = { .isc = 0 };
+static struct kvm_s390_ais_all uc_flic_asima = { .simm = 0 };
+static struct uc_flic_attr_test {
+ char *name;
+ struct kvm_device_attr a;
+ int hasrc;
+ int geterrno;
+ int seterrno;
+} uc_flic_attr_tests[] = {
+ {
+ .name = "KVM_DEV_FLIC_GET_ALL_IRQS",
+ .seterrno = EINVAL,
+ .a = {
+ .group = KVM_DEV_FLIC_GET_ALL_IRQS,
+ .addr = (u64)&uc_flic_b,
+ .attr = PAGE_SIZE,
+ },
+ },
+ {
+ .name = "KVM_DEV_FLIC_ENQUEUE",
+ .geterrno = EINVAL,
+ .a = { .group = KVM_DEV_FLIC_ENQUEUE, },
+ },
+ {
+ .name = "KVM_DEV_FLIC_CLEAR_IRQS",
+ .geterrno = EINVAL,
+ .a = { .group = KVM_DEV_FLIC_CLEAR_IRQS, },
+ },
+ {
+ .name = "KVM_DEV_FLIC_ADAPTER_REGISTER",
+ .geterrno = EINVAL,
+ .a = {
+ .group = KVM_DEV_FLIC_ADAPTER_REGISTER,
+ .addr = (u64)&uc_flic_ioa,
+ },
+ },
+ {
+ .name = "KVM_DEV_FLIC_ADAPTER_MODIFY",
+ .geterrno = EINVAL,
+ .seterrno = EINVAL,
+ .a = {
+ .group = KVM_DEV_FLIC_ADAPTER_MODIFY,
+ .addr = (u64)&uc_flic_ioam,
+ .attr = sizeof(uc_flic_ioam),
+ },
+ },
+ {
+ .name = "KVM_DEV_FLIC_CLEAR_IO_IRQ",
+ .geterrno = EINVAL,
+ .seterrno = EINVAL,
+ .a = {
+ .group = KVM_DEV_FLIC_CLEAR_IO_IRQ,
+ .attr = 32,
+ },
+ },
+ {
+ .name = "KVM_DEV_FLIC_AISM",
+ .geterrno = EINVAL,
+ .seterrno = ENOTSUP,
+ .a = {
+ .group = KVM_DEV_FLIC_AISM,
+ .addr = (u64)&uc_flic_asim,
+ },
+ },
+ {
+ .name = "KVM_DEV_FLIC_AIRQ_INJECT",
+ .geterrno = EINVAL,
+ .a = { .group = KVM_DEV_FLIC_AIRQ_INJECT, },
+ },
+ {
+ .name = "KVM_DEV_FLIC_AISM_ALL",
+ .geterrno = ENOTSUP,
+ .seterrno = ENOTSUP,
+ .a = {
+ .group = KVM_DEV_FLIC_AISM_ALL,
+ .addr = (u64)&uc_flic_asima,
+ .attr = sizeof(uc_flic_asima),
+ },
+ },
+ {
+ .name = "KVM_DEV_FLIC_APF_ENABLE",
+ .geterrno = EINVAL,
+ .seterrno = EINVAL,
+ .a = { .group = KVM_DEV_FLIC_APF_ENABLE, },
+ },
+ {
+ .name = "KVM_DEV_FLIC_APF_DISABLE_WAIT",
+ .geterrno = EINVAL,
+ .seterrno = EINVAL,
+ .a = { .group = KVM_DEV_FLIC_APF_DISABLE_WAIT, },
+ },
+};
+
+TEST_F(uc_kvm, uc_flic_attrs)
+{
+ struct kvm_create_device cd = { .type = KVM_DEV_TYPE_FLIC };
+ struct kvm_device_attr attr;
+ u64 value;
+ int rc, i;
+
+ rc = ioctl(self->vm_fd, KVM_CREATE_DEVICE, &cd);
+ ASSERT_EQ(0, rc) TH_LOG("create device failed with err %s (%i)",
+ strerror(errno), errno);
+
+ for (i = 0; i < ARRAY_SIZE(uc_flic_attr_tests); i++) {
+ TH_LOG("test %s", uc_flic_attr_tests[i].name);
+ attr = (struct kvm_device_attr) {
+ .group = uc_flic_attr_tests[i].a.group,
+ .attr = uc_flic_attr_tests[i].a.attr,
+ .addr = uc_flic_attr_tests[i].a.addr,
+ };
+ if (attr.addr == 0)
+ attr.addr = (u64)&value;
+
+ rc = ioctl(cd.fd, KVM_HAS_DEVICE_ATTR, &attr);
+ EXPECT_EQ(uc_flic_attr_tests[i].hasrc, !!rc)
+ TH_LOG("expected dev attr missing %s",
+ uc_flic_attr_tests[i].name);
+
+ rc = ioctl(cd.fd, KVM_GET_DEVICE_ATTR, &attr);
+ EXPECT_EQ(!!uc_flic_attr_tests[i].geterrno, !!rc)
+ TH_LOG("get dev attr rc not expected on %s %s (%i)",
+ uc_flic_attr_tests[i].name,
+ strerror(errno), errno);
+ if (uc_flic_attr_tests[i].geterrno)
+ EXPECT_EQ(uc_flic_attr_tests[i].geterrno, errno)
+ TH_LOG("get dev attr errno not expected on %s %s (%i)",
+ uc_flic_attr_tests[i].name,
+ strerror(errno), errno);
+
+ rc = ioctl(cd.fd, KVM_SET_DEVICE_ATTR, &attr);
+ EXPECT_EQ(!!uc_flic_attr_tests[i].seterrno, !!rc)
+ TH_LOG("set sev attr rc not expected on %s %s (%i)",
+ uc_flic_attr_tests[i].name,
+ strerror(errno), errno);
+ if (uc_flic_attr_tests[i].seterrno)
+ EXPECT_EQ(uc_flic_attr_tests[i].seterrno, errno)
+ TH_LOG("set dev attr errno not expected on %s %s (%i)",
+ uc_flic_attr_tests[i].name,
+ strerror(errno), errno);
+ }
+
+ close(cd.fd);
+}
+
+TEST_F(uc_kvm, uc_set_gsi_routing)
+{
+ struct kvm_irq_routing *routing = kvm_gsi_routing_create();
+ struct kvm_irq_routing_entry ue = {
+ .type = KVM_IRQ_ROUTING_S390_ADAPTER,
+ .gsi = 1,
+ .u.adapter = (struct kvm_irq_routing_s390_adapter) {
+ .ind_addr = 0,
+ },
+ };
+ int rc;
+
+ routing->entries[0] = ue;
+ routing->nr = 1;
+ rc = ioctl(self->vm_fd, KVM_SET_GSI_ROUTING, routing);
+ ASSERT_EQ(-1, rc) TH_LOG("err %s (%i)", strerror(errno), errno);
+ ASSERT_EQ(EINVAL, errno) TH_LOG("err %s (%i)", strerror(errno), errno);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index 4209b9569039..414addef9a45 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -25,6 +25,8 @@
#include <sys/types.h>
#include <sys/mman.h>
+#include <arpa/inet.h>
+
#include <netdb.h>
#include <netinet/in.h>
@@ -1211,23 +1213,42 @@ static void parse_setsock_options(const char *name)
exit(1);
}
-void xdisconnect(int fd, int addrlen)
+void xdisconnect(int fd)
{
- struct sockaddr_storage empty;
+ socklen_t addrlen = sizeof(struct sockaddr_storage);
+ struct sockaddr_storage addr, empty;
int msec_sleep = 10;
- int queued = 1;
- int i;
+ void *raw_addr;
+ int i, cmdlen;
+ char cmd[128];
+
+ /* get the local address and convert it to string */
+ if (getsockname(fd, (struct sockaddr *)&addr, &addrlen) < 0)
+ xerror("getsockname");
+
+ if (addr.ss_family == AF_INET)
+ raw_addr = &(((struct sockaddr_in *)&addr)->sin_addr);
+ else if (addr.ss_family == AF_INET6)
+ raw_addr = &(((struct sockaddr_in6 *)&addr)->sin6_addr);
+ else
+ xerror("bad family");
+
+ strcpy(cmd, "ss -M | grep -q ");
+ cmdlen = strlen(cmd);
+ if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen],
+ sizeof(cmd) - cmdlen))
+ xerror("inet_ntop");
shutdown(fd, SHUT_WR);
- /* while until the pending data is completely flushed, the later
+ /*
+ * wait until the pending data is completely flushed and all
+ * the MPTCP sockets reached the closed status.
* disconnect will bypass/ignore/drop any pending data.
*/
for (i = 0; ; i += msec_sleep) {
- if (ioctl(fd, SIOCOUTQ, &queued) < 0)
- xerror("can't query out socket queue: %d", errno);
-
- if (!queued)
+ /* closed socket are not listed by 'ss' */
+ if (system(cmd) != 0)
break;
if (i > poll_timeout)
@@ -1281,9 +1302,9 @@ again:
return ret;
if (cfg_truncate > 0) {
- xdisconnect(fd, peer->ai_addrlen);
+ xdisconnect(fd);
} else if (--cfg_repeat > 0) {
- xdisconnect(fd, peer->ai_addrlen);
+ xdisconnect(fd);
/* the socket could be unblocking at this point, we need the
* connect to be blocking
diff --git a/tools/testing/selftests/riscv/abi/pointer_masking.c b/tools/testing/selftests/riscv/abi/pointer_masking.c
index dee41b7ee3e3..059d2e87eb1f 100644
--- a/tools/testing/selftests/riscv/abi/pointer_masking.c
+++ b/tools/testing/selftests/riscv/abi/pointer_masking.c
@@ -185,8 +185,20 @@ static void test_fork_exec(void)
}
}
+static bool pwrite_wrapper(int fd, void *buf, size_t count, const char *msg)
+{
+ int ret = pwrite(fd, buf, count, 0);
+
+ if (ret != count) {
+ ksft_perror(msg);
+ return false;
+ }
+ return true;
+}
+
static void test_tagged_addr_abi_sysctl(void)
{
+ char *err_pwrite_msg = "failed to write to /proc/sys/abi/tagged_addr_disabled\n";
char value;
int fd;
@@ -200,14 +212,18 @@ static void test_tagged_addr_abi_sysctl(void)
}
value = '1';
- pwrite(fd, &value, 1, 0);
- ksft_test_result(set_tagged_addr_ctrl(min_pmlen, true) == -EINVAL,
- "sysctl disabled\n");
+ if (!pwrite_wrapper(fd, &value, 1, "write '1'"))
+ ksft_test_result_fail(err_pwrite_msg);
+ else
+ ksft_test_result(set_tagged_addr_ctrl(min_pmlen, true) == -EINVAL,
+ "sysctl disabled\n");
value = '0';
- pwrite(fd, &value, 1, 0);
- ksft_test_result(set_tagged_addr_ctrl(min_pmlen, true) == 0,
- "sysctl enabled\n");
+ if (!pwrite_wrapper(fd, &value, 1, "write '0'"))
+ ksft_test_result_fail(err_pwrite_msg);
+ else
+ ksft_test_result(set_tagged_addr_ctrl(min_pmlen, true) == 0,
+ "sysctl enabled\n");
set_tagged_addr_ctrl(0, false);
diff --git a/tools/testing/selftests/riscv/vector/v_initval_nolibc.c b/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
index 1dd94197da30..6174ffe016dc 100644
--- a/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
+++ b/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
@@ -25,6 +25,8 @@ int main(void)
unsigned long vl;
char *datap, *tmp;
+ ksft_set_plan(1);
+
datap = malloc(MAX_VSIZE);
if (!datap) {
ksft_test_result_fail("fail to allocate memory for size = %d\n", MAX_VSIZE);
@@ -63,6 +65,8 @@ int main(void)
}
free(datap);
+
+ ksft_test_result_pass("tests for v_initval_nolibc pass\n");
ksft_exit_pass();
return 0;
}
diff --git a/tools/testing/selftests/riscv/vector/vstate_prctl.c b/tools/testing/selftests/riscv/vector/vstate_prctl.c
index 895177f6bf4c..40b3bffcbb40 100644
--- a/tools/testing/selftests/riscv/vector/vstate_prctl.c
+++ b/tools/testing/selftests/riscv/vector/vstate_prctl.c
@@ -76,6 +76,8 @@ int main(void)
long flag, expected;
long rc;
+ ksft_set_plan(1);
+
pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0;
rc = riscv_hwprobe(&pair, 1, 0, NULL, 0);
if (rc < 0) {
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json b/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json
index 996448afe31b..91d120548bf5 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json
@@ -78,10 +78,10 @@
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key dst rshift 0xff",
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key dst rshift 0x1f",
"expExitCode": "0",
"verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol ip prio 1 flow",
- "matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst rshift 255 baseclass",
+ "matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst rshift 31 baseclass",
"matchCount": "1",
"teardown": [
"$TC qdisc del dev $DEV1 ingress"